aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c12
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/ddp.c79
-rw-r--r--net/atm/lec.c3161
-rw-r--r--net/atm/lec.h171
-rw-r--r--net/atm/lec_arpc.h146
-rw-r--r--net/atm/mpc.c13
-rw-r--r--net/atm/mpoa_caches.c12
-rw-r--r--net/bluetooth/af_bluetooth.c40
-rw-r--r--net/bluetooth/bnep/core.c24
-rw-r--r--net/bluetooth/bnep/sock.c69
-rw-r--r--net/bluetooth/cmtp/sock.c35
-rw-r--r--net/bluetooth/hci_conn.c45
-rw-r--r--net/bluetooth/hci_core.c3
-rw-r--r--net/bluetooth/hci_event.c41
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hci_sysfs.c148
-rw-r--r--net/bluetooth/hidp/core.c31
-rw-r--r--net/bluetooth/hidp/sock.c80
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/bluetooth/rfcomm/core.c5
-rw-r--r--net/bluetooth/rfcomm/sock.c6
-rw-r--r--net/bluetooth/rfcomm/tty.c24
-rw-r--r--net/bluetooth/sco.c6
-rw-r--r--net/bridge/br_fdb.c7
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c21
-rw-r--r--net/compat.c3
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/ethtool.c32
-rw-r--r--net/core/fib_rules.c1
-rw-r--r--net/core/flow.c42
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net-sysfs.c5
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/pktgen.c321
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/scm.c3
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/utils.c118
-rw-r--r--net/core/wireless.c67
-rw-r--r--net/dccp/ipv4.c8
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c8
-rw-r--r--net/ipv4/Kconfig15
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/arp.c42
-rw-r--r--net/ipv4/cipso_ipv4.c48
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c42
-rw-r--r--net/ipv4/esp4.c26
-rw-r--r--net/ipv4/fib_frontend.c40
-rw-r--r--net/ipv4/fib_hash.c22
-rw-r--r--net/ipv4/fib_lookup.h6
-rw-r--r--net/ipv4/fib_rules.c24
-rw-r--r--net/ipv4/fib_semantics.c33
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c68
-rw-r--r--net/ipv4/inet_connection_sock.c12
-rw-r--r--net/ipv4/inet_diag.c12
-rw-r--r--net/ipv4/inet_hashtables.c12
-rw-r--r--net/ipv4/inetpeer.c33
-rw-r--r--net/ipv4/ip_fragment.c15
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_options.c26
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/ip_sockglue.c4
-rw-r--r--net/ipv4/ipcomp.c7
-rw-r--r--net/ipv4/ipconfig.c16
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/ipvs/Kconfig4
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c24
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c24
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c26
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c29
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c14
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c20
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/multipath_wrandom.c14
-rw-r--r--net/ipv4/netfilter.c13
-rw-r--r--net/ipv4/netfilter/Kconfig2
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_amanda.c6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c12
-rw-r--r--net/ipv4/netfilter/ip_conntrack_ftp.c6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c84
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_irc.c5
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netbios_ns.c12
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c154
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_sip.c16
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c8
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c14
-rw-r--r--net/ipv4/netfilter/ip_nat_ftp.c10
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c39
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_h323.c16
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_pptp.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_tcp.c10
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_udp.c10
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c6
-rw-r--r--net/ipv4/netfilter/ip_nat_sip.c8
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c3
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c14
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c12
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c2
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c101
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c3
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c17
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c4
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c4
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c2
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c16
-rw-r--r--net/ipv4/netfilter/ipt_recent.c15
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c5
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c111
-rw-r--r--net/ipv4/tcp_input.c32
-rw-r--r--net/ipv4/tcp_ipv4.c28
-rw-r--r--net/ipv4/tcp_lp.c4
-rw-r--r--net/ipv4/tcp_output.c63
-rw-r--r--net/ipv4/tcp_probe.c6
-rw-r--r--net/ipv4/udp.c34
-rw-r--r--net/ipv4/xfrm4_input.c4
-rw-r--r--net/ipv4/xfrm4_mode_beet.c139
-rw-r--r--net/ipv4/xfrm4_policy.c8
-rw-r--r--net/ipv4/xfrm4_state.c4
-rw-r--r--net/ipv6/Kconfig39
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/fib6_rules.c12
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ipcomp6.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mip6.c1
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/route.c66
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c15
-rw-r--r--net/ipv6/udp.c64
-rw-r--r--net/ipv6/xfrm6_input.c4
-rw-r--r--net/ipv6/xfrm6_mode_beet.c107
-rw-r--r--net/ipv6/xfrm6_policy.c14
-rw-r--r--net/ipv6/xfrm6_state.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/irda/af_irda.c12
-rw-r--r--net/irda/ircomm/ircomm_lmp.c4
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/irda/iriap.c9
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c2
-rw-r--r--net/irda/irlan/irlan_common.c46
-rw-r--r--net/irda/irlan/irlan_provider.c12
-rw-r--r--net/irda/irlap_frame.c59
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irttp.c14
-rw-r--r--net/key/af_key.c13
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c72
-rw-r--r--net/netfilter/xt_NFQUEUE.c2
-rw-r--r--net/netfilter/xt_connmark.c2
-rw-r--r--net/netlabel/netlabel_cipso_v4.c59
-rw-r--r--net/netlabel/netlabel_domainhash.c56
-rw-r--r--net/netlabel/netlabel_domainhash.h8
-rw-r--r--net/netlabel/netlabel_kapi.c2
-rw-r--r--net/netlabel/netlabel_mgmt.c25
-rw-r--r--net/netlabel/netlabel_unlabeled.c48
-rw-r--r--net/netlabel/netlabel_user.c41
-rw-r--r--net/netlabel/netlabel_user.h18
-rw-r--r--net/rxrpc/transport.c3
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/cls_basic.c2
-rw-r--r--net/sched/estimator.c196
-rw-r--r--net/sched/sch_api.c16
-rw-r--r--net/sched/sch_generic.c66
-rw-r--r--net/sched/sch_htb.c5
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/ipv6.c10
-rw-r--r--net/sctp/output.c10
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/sm_make_chunk.c10
-rw-r--r--net/sctp/socket.c28
-rw-r--r--net/sctp/ulpevent.c25
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c87
-rw-r--r--net/sunrpc/auth.c12
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c35
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c148
-rw-r--r--net/sunrpc/auth_null.c8
-rw-r--r--net/sunrpc/auth_unix.c10
-rw-r--r--net/sunrpc/clnt.c27
-rw-r--r--net/sunrpc/pmap_clnt.c20
-rw-r--r--net/sunrpc/rpc_pipe.c10
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/sunrpc_syms.c2
-rw-r--r--net/sunrpc/svc.c604
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c90
-rw-r--r--net/sunrpc/svcsock.c468
-rw-r--r--net/sunrpc/xdr.c54
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c5
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/config.c5
-rw-r--r--net/tipc/core.c4
-rw-r--r--net/tipc/core.h30
-rw-r--r--net/tipc/dbg.c136
-rw-r--r--net/tipc/dbg.h15
-rw-r--r--net/tipc/discover.c39
-rw-r--r--net/tipc/link.c13
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/port.c7
-rw-r--r--net/tipc/socket.c19
-rw-r--r--net/tipc/subscr.c99
-rw-r--r--net/xfrm/xfrm_hash.h11
-rw-r--r--net/xfrm/xfrm_input.c8
-rw-r--r--net/xfrm/xfrm_policy.c108
-rw-r--r--net/xfrm/xfrm_state.c70
-rw-r--r--net/xfrm/xfrm_user.c10
247 files changed, 5958 insertions, 4245 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index d7d8f40c4fed..829deb41ce81 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -164,7 +164,7 @@ static int tr_rebuild_header(struct sk_buff *skb)
164 */ 164 */
165 165
166 if(trllc->ethertype != htons(ETH_P_IP)) { 166 if(trllc->ethertype != htons(ETH_P_IP)) {
167 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n",(unsigned int)htons(trllc->ethertype)); 167 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
168 return 0; 168 return 0;
169 } 169 }
170 170
@@ -186,7 +186,7 @@ static int tr_rebuild_header(struct sk_buff *skb)
186 * it via SNAP. 186 * it via SNAP.
187 */ 187 */
188 188
189unsigned short tr_type_trans(struct sk_buff *skb, struct net_device *dev) 189__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
190{ 190{
191 191
192 struct trh_hdr *trh=(struct trh_hdr *)skb->data; 192 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
@@ -229,15 +229,15 @@ unsigned short tr_type_trans(struct sk_buff *skb, struct net_device *dev)
229 */ 229 */
230 230
231 if (trllc->dsap == EXTENDED_SAP && 231 if (trllc->dsap == EXTENDED_SAP &&
232 (trllc->ethertype == ntohs(ETH_P_IP) || 232 (trllc->ethertype == htons(ETH_P_IP) ||
233 trllc->ethertype == ntohs(ETH_P_IPV6) || 233 trllc->ethertype == htons(ETH_P_IPV6) ||
234 trllc->ethertype == ntohs(ETH_P_ARP))) 234 trllc->ethertype == htons(ETH_P_ARP)))
235 { 235 {
236 skb_pull(skb, sizeof(struct trllc)); 236 skb_pull(skb, sizeof(struct trllc));
237 return trllc->ethertype; 237 return trllc->ethertype;
238 } 238 }
239 239
240 return ntohs(ETH_P_TR_802_2); 240 return htons(ETH_P_TR_802_2);
241} 241}
242 242
243/* 243/*
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index da9cfe927158..60a508eb1945 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -62,7 +62,7 @@ int vlan_dev_rebuild_header(struct sk_buff *skb)
62 default: 62 default:
63 printk(VLAN_DBG 63 printk(VLAN_DBG
64 "%s: unable to resolve type %X addresses.\n", 64 "%s: unable to resolve type %X addresses.\n",
65 dev->name, (int)veth->h_vlan_encapsulated_proto); 65 dev->name, ntohs(veth->h_vlan_encapsulated_proto));
66 66
67 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); 67 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
68 break; 68 break;
diff --git a/net/Kconfig b/net/Kconfig
index 6528a935622c..a81aca43932f 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -231,7 +231,7 @@ config NET_TCPPROBE
231 TCP congestion avoidance modules. If you don't understand 231 TCP congestion avoidance modules. If you don't understand
232 what was just said, you don't need it: say N. 232 what was just said, you don't need it: say N.
233 233
234 Documentation on how to use the packet generator can be found 234 Documentation on how to use TCP connection probing can be found
235 at http://linux-net.osdl.org/index.php/TcpProbe 235 at http://linux-net.osdl.org/index.php/TcpProbe
236 236
237 To compile this code as a module, choose M here: the 237 To compile this code as a module, choose M here: the
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 96dc6bb52d14..708e2e0371af 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1002,7 +1002,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
1002 return sum; 1002 return sum;
1003} 1003}
1004 1004
1005static unsigned short atalk_checksum(const struct sk_buff *skb, int len) 1005static __be16 atalk_checksum(const struct sk_buff *skb, int len)
1006{ 1006{
1007 unsigned long sum; 1007 unsigned long sum;
1008 1008
@@ -1010,7 +1010,7 @@ static unsigned short atalk_checksum(const struct sk_buff *skb, int len)
1010 sum = atalk_sum_skb(skb, 4, len-4, 0); 1010 sum = atalk_sum_skb(skb, 4, len-4, 0);
1011 1011
1012 /* Use 0xFFFF for 0. 0 itself means none */ 1012 /* Use 0xFFFF for 0. 0 itself means none */
1013 return sum ? htons((unsigned short)sum) : 0xFFFF; 1013 return sum ? htons((unsigned short)sum) : htons(0xFFFF);
1014} 1014}
1015 1015
1016static struct proto ddp_proto = { 1016static struct proto ddp_proto = {
@@ -1289,7 +1289,7 @@ static int handle_ip_over_ddp(struct sk_buff *skb)
1289#endif 1289#endif
1290 1290
1291static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev, 1291static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
1292 struct ddpehdr *ddp, struct ddpebits *ddphv, 1292 struct ddpehdr *ddp, __u16 len_hops,
1293 int origlen) 1293 int origlen)
1294{ 1294{
1295 struct atalk_route *rt; 1295 struct atalk_route *rt;
@@ -1317,10 +1317,12 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
1317 1317
1318 /* Route the packet */ 1318 /* Route the packet */
1319 rt = atrtr_find(&ta); 1319 rt = atrtr_find(&ta);
1320 if (!rt || ddphv->deh_hops == DDP_MAXHOPS) 1320 /* increment hops count */
1321 len_hops += 1 << 10;
1322 if (!rt || !(len_hops & (15 << 10)))
1321 goto free_it; 1323 goto free_it;
1324
1322 /* FIXME: use skb->cb to be able to use shared skbs */ 1325 /* FIXME: use skb->cb to be able to use shared skbs */
1323 ddphv->deh_hops++;
1324 1326
1325 /* 1327 /*
1326 * Route goes through another gateway, so set the target to the 1328 * Route goes through another gateway, so set the target to the
@@ -1335,11 +1337,10 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
1335 /* Fix up skb->len field */ 1337 /* Fix up skb->len field */
1336 skb_trim(skb, min_t(unsigned int, origlen, 1338 skb_trim(skb, min_t(unsigned int, origlen,
1337 (rt->dev->hard_header_len + 1339 (rt->dev->hard_header_len +
1338 ddp_dl->header_length + ddphv->deh_len))); 1340 ddp_dl->header_length + (len_hops & 1023))));
1339 1341
1340 /* Mend the byte order */
1341 /* FIXME: use skb->cb to be able to use shared skbs */ 1342 /* FIXME: use skb->cb to be able to use shared skbs */
1342 *((__u16 *)ddp) = ntohs(*((__u16 *)ddphv)); 1343 ddp->deh_len_hops = htons(len_hops);
1343 1344
1344 /* 1345 /*
1345 * Send the buffer onwards 1346 * Send the buffer onwards
@@ -1394,7 +1395,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1394 struct atalk_iface *atif; 1395 struct atalk_iface *atif;
1395 struct sockaddr_at tosat; 1396 struct sockaddr_at tosat;
1396 int origlen; 1397 int origlen;
1397 struct ddpebits ddphv; 1398 __u16 len_hops;
1398 1399
1399 /* Don't mangle buffer if shared */ 1400 /* Don't mangle buffer if shared */
1400 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 1401 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
@@ -1406,16 +1407,11 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1406 1407
1407 ddp = ddp_hdr(skb); 1408 ddp = ddp_hdr(skb);
1408 1409
1409 /* 1410 len_hops = ntohs(ddp->deh_len_hops);
1410 * Fix up the length field [Ok this is horrible but otherwise
1411 * I end up with unions of bit fields and messy bit field order
1412 * compiler/endian dependencies..]
1413 */
1414 *((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
1415 1411
1416 /* Trim buffer in case of stray trailing data */ 1412 /* Trim buffer in case of stray trailing data */
1417 origlen = skb->len; 1413 origlen = skb->len;
1418 skb_trim(skb, min_t(unsigned int, skb->len, ddphv.deh_len)); 1414 skb_trim(skb, min_t(unsigned int, skb->len, len_hops & 1023));
1419 1415
1420 /* 1416 /*
1421 * Size check to see if ddp->deh_len was crap 1417 * Size check to see if ddp->deh_len was crap
@@ -1430,7 +1426,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1430 * valid for net byte orders all over the networking code... 1426 * valid for net byte orders all over the networking code...
1431 */ 1427 */
1432 if (ddp->deh_sum && 1428 if (ddp->deh_sum &&
1433 atalk_checksum(skb, ddphv.deh_len) != ddp->deh_sum) 1429 atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
1434 /* Not a valid AppleTalk frame - dustbin time */ 1430 /* Not a valid AppleTalk frame - dustbin time */
1435 goto freeit; 1431 goto freeit;
1436 1432
@@ -1444,7 +1440,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1444 /* Not ours, so we route the packet via the correct 1440 /* Not ours, so we route the packet via the correct
1445 * AppleTalk iface 1441 * AppleTalk iface
1446 */ 1442 */
1447 atalk_route_packet(skb, dev, ddp, &ddphv, origlen); 1443 atalk_route_packet(skb, dev, ddp, len_hops, origlen);
1448 goto out; 1444 goto out;
1449 } 1445 }
1450 1446
@@ -1489,7 +1485,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1489 /* Find our address */ 1485 /* Find our address */
1490 struct atalk_addr *ap = atalk_find_dev_addr(dev); 1486 struct atalk_addr *ap = atalk_find_dev_addr(dev);
1491 1487
1492 if (!ap || skb->len < sizeof(struct ddpshdr)) 1488 if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
1493 goto freeit; 1489 goto freeit;
1494 1490
1495 /* Don't mangle buffer if shared */ 1491 /* Don't mangle buffer if shared */
@@ -1519,11 +1515,8 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1519 /* 1515 /*
1520 * Not sure about this bit... 1516 * Not sure about this bit...
1521 */ 1517 */
1522 ddp->deh_len = skb->len; 1518 /* Non routable, so force a drop if we slip up later */
1523 ddp->deh_hops = DDP_MAXHOPS; /* Non routable, so force a drop 1519 ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
1524 if we slip up later */
1525 /* Mend the byte order */
1526 *((__u16 *)ddp) = htons(*((__u16 *)ddp));
1527 } 1520 }
1528 skb->h.raw = skb->data; 1521 skb->h.raw = skb->data;
1529 1522
@@ -1622,16 +1615,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1622 SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); 1615 SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
1623 1616
1624 ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr)); 1617 ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr));
1625 ddp->deh_pad = 0; 1618 ddp->deh_len_hops = htons(len + sizeof(*ddp));
1626 ddp->deh_hops = 0;
1627 ddp->deh_len = len + sizeof(*ddp);
1628 /*
1629 * Fix up the length field [Ok this is horrible but otherwise
1630 * I end up with unions of bit fields and messy bit field order
1631 * compiler/endian dependencies..
1632 */
1633 *((__u16 *)ddp) = ntohs(*((__u16 *)ddp));
1634
1635 ddp->deh_dnet = usat->sat_addr.s_net; 1619 ddp->deh_dnet = usat->sat_addr.s_net;
1636 ddp->deh_snet = at->src_net; 1620 ddp->deh_snet = at->src_net;
1637 ddp->deh_dnode = usat->sat_addr.s_node; 1621 ddp->deh_dnode = usat->sat_addr.s_node;
@@ -1712,8 +1696,8 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1712 struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name; 1696 struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
1713 struct ddpehdr *ddp; 1697 struct ddpehdr *ddp;
1714 int copied = 0; 1698 int copied = 0;
1699 int offset = 0;
1715 int err = 0; 1700 int err = 0;
1716 struct ddpebits ddphv;
1717 struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1701 struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1718 flags & MSG_DONTWAIT, &err); 1702 flags & MSG_DONTWAIT, &err);
1719 if (!skb) 1703 if (!skb)
@@ -1721,25 +1705,18 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1721 1705
1722 /* FIXME: use skb->cb to be able to use shared skbs */ 1706 /* FIXME: use skb->cb to be able to use shared skbs */
1723 ddp = ddp_hdr(skb); 1707 ddp = ddp_hdr(skb);
1724 *((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp)); 1708 copied = ntohs(ddp->deh_len_hops) & 1023;
1725 1709
1726 if (sk->sk_type == SOCK_RAW) { 1710 if (sk->sk_type != SOCK_RAW) {
1727 copied = ddphv.deh_len; 1711 offset = sizeof(*ddp);
1728 if (copied > size) { 1712 copied -= offset;
1729 copied = size; 1713 }
1730 msg->msg_flags |= MSG_TRUNC;
1731 }
1732 1714
1733 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1715 if (copied > size) {
1734 } else { 1716 copied = size;
1735 copied = ddphv.deh_len - sizeof(*ddp); 1717 msg->msg_flags |= MSG_TRUNC;
1736 if (copied > size) {
1737 copied = size;
1738 msg->msg_flags |= MSG_TRUNC;
1739 }
1740 err = skb_copy_datagram_iovec(skb, sizeof(*ddp),
1741 msg->msg_iov, copied);
1742 } 1718 }
1719 err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
1743 1720
1744 if (!err) { 1721 if (!err) {
1745 if (sat) { 1722 if (sat) {
diff --git a/net/atm/lec.c b/net/atm/lec.c
index b4aa489849df..66c57c1091a8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * lec.c: Lan Emulation driver 2 * lec.c: Lan Emulation driver
3 * Marko Kiiskila mkiiskila@yahoo.com
4 * 3 *
4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
@@ -38,7 +38,7 @@
38#include <linux/if_bridge.h> 38#include <linux/if_bridge.h>
39#include "../bridge/br_private.h" 39#include "../bridge/br_private.h"
40 40
41static unsigned char bridge_ula_lec[] = {0x01, 0x80, 0xc2, 0x00, 0x00}; 41static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
42#endif 42#endif
43 43
44/* Modular too */ 44/* Modular too */
@@ -55,38 +55,41 @@ static unsigned char bridge_ula_lec[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
55#define DPRINTK(format,args...) 55#define DPRINTK(format,args...)
56#endif 56#endif
57 57
58#define DUMP_PACKETS 0 /* 0 = None, 58#define DUMP_PACKETS 0 /*
59 * 1 = 30 first bytes 59 * 0 = None,
60 * 2 = Whole packet 60 * 1 = 30 first bytes
61 */ 61 * 2 = Whole packet
62 */
62 63
63#define LEC_UNRES_QUE_LEN 8 /* number of tx packets to queue for a 64#define LEC_UNRES_QUE_LEN 8 /*
64 single destination while waiting for SVC */ 65 * number of tx packets to queue for a
66 * single destination while waiting for SVC
67 */
65 68
66static int lec_open(struct net_device *dev); 69static int lec_open(struct net_device *dev);
67static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); 70static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev);
68static int lec_close(struct net_device *dev); 71static int lec_close(struct net_device *dev);
69static struct net_device_stats *lec_get_stats(struct net_device *dev); 72static struct net_device_stats *lec_get_stats(struct net_device *dev);
70static void lec_init(struct net_device *dev); 73static void lec_init(struct net_device *dev);
71static struct lec_arp_table* lec_arp_find(struct lec_priv *priv, 74static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
72 unsigned char *mac_addr); 75 unsigned char *mac_addr);
73static int lec_arp_remove(struct lec_priv *priv, 76static int lec_arp_remove(struct lec_priv *priv,
74 struct lec_arp_table *to_remove); 77 struct lec_arp_table *to_remove);
75/* LANE2 functions */ 78/* LANE2 functions */
76static void lane2_associate_ind (struct net_device *dev, u8 *mac_address, 79static void lane2_associate_ind(struct net_device *dev, u8 *mac_address,
77 u8 *tlvs, u32 sizeoftlvs); 80 u8 *tlvs, u32 sizeoftlvs);
78static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, 81static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
79 u8 **tlvs, u32 *sizeoftlvs); 82 u8 **tlvs, u32 *sizeoftlvs);
80static int lane2_associate_req (struct net_device *dev, u8 *lan_dst, 83static int lane2_associate_req(struct net_device *dev, u8 *lan_dst,
81 u8 *tlvs, u32 sizeoftlvs); 84 u8 *tlvs, u32 sizeoftlvs);
82 85
83static int lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, 86static int lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr,
84 unsigned long permanent); 87 unsigned long permanent);
85static void lec_arp_check_empties(struct lec_priv *priv, 88static void lec_arp_check_empties(struct lec_priv *priv,
86 struct atm_vcc *vcc, struct sk_buff *skb); 89 struct atm_vcc *vcc, struct sk_buff *skb);
87static void lec_arp_destroy(struct lec_priv *priv); 90static void lec_arp_destroy(struct lec_priv *priv);
88static void lec_arp_init(struct lec_priv *priv); 91static void lec_arp_init(struct lec_priv *priv);
89static struct atm_vcc* lec_arp_resolve(struct lec_priv *priv, 92static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
90 unsigned char *mac_to_find, 93 unsigned char *mac_to_find,
91 int is_rdesc, 94 int is_rdesc,
92 struct lec_arp_table **ret_entry); 95 struct lec_arp_table **ret_entry);
@@ -100,16 +103,30 @@ static void lec_set_flush_tran_id(struct lec_priv *priv,
100 unsigned long tran_id); 103 unsigned long tran_id);
101static void lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 104static void lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data,
102 struct atm_vcc *vcc, 105 struct atm_vcc *vcc,
103 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb)); 106 void (*old_push) (struct atm_vcc *vcc,
107 struct sk_buff *skb));
104static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); 108static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
105 109
110/* must be done under lec_arp_lock */
111static inline void lec_arp_hold(struct lec_arp_table *entry)
112{
113 atomic_inc(&entry->usage);
114}
115
116static inline void lec_arp_put(struct lec_arp_table *entry)
117{
118 if (atomic_dec_and_test(&entry->usage))
119 kfree(entry);
120}
121
122
106static struct lane2_ops lane2_ops = { 123static struct lane2_ops lane2_ops = {
107 lane2_resolve, /* resolve, spec 3.1.3 */ 124 lane2_resolve, /* resolve, spec 3.1.3 */
108 lane2_associate_req, /* associate_req, spec 3.1.4 */ 125 lane2_associate_req, /* associate_req, spec 3.1.4 */
109 NULL /* associate indicator, spec 3.1.5 */ 126 NULL /* associate indicator, spec 3.1.5 */
110}; 127};
111 128
112static unsigned char bus_mac[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff}; 129static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
113 130
114/* Device structures */ 131/* Device structures */
115static struct net_device *dev_lec[MAX_LEC_ITF]; 132static struct net_device *dev_lec[MAX_LEC_ITF];
@@ -117,36 +134,39 @@ static struct net_device *dev_lec[MAX_LEC_ITF];
117#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 134#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
118static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) 135static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
119{ 136{
120 struct ethhdr *eth; 137 struct ethhdr *eth;
121 char *buff; 138 char *buff;
122 struct lec_priv *priv; 139 struct lec_priv *priv;
123 140
124 /* Check if this is a BPDU. If so, ask zeppelin to send 141 /*
125 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit 142 * Check if this is a BPDU. If so, ask zeppelin to send
126 * as the Config BPDU has */ 143 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
127 eth = (struct ethhdr *)skb->data; 144 * as the Config BPDU has
128 buff = skb->data + skb->dev->hard_header_len; 145 */
129 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { 146 eth = (struct ethhdr *)skb->data;
147 buff = skb->data + skb->dev->hard_header_len;
148 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
130 struct sock *sk; 149 struct sock *sk;
131 struct sk_buff *skb2; 150 struct sk_buff *skb2;
132 struct atmlec_msg *mesg; 151 struct atmlec_msg *mesg;
133 152
134 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 153 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
135 if (skb2 == NULL) return; 154 if (skb2 == NULL)
136 skb2->len = sizeof(struct atmlec_msg); 155 return;
137 mesg = (struct atmlec_msg *)skb2->data; 156 skb2->len = sizeof(struct atmlec_msg);
138 mesg->type = l_topology_change; 157 mesg = (struct atmlec_msg *)skb2->data;
139 buff += 4; 158 mesg->type = l_topology_change;
140 mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ 159 buff += 4;
141 160 mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */
142 priv = (struct lec_priv *)dev->priv; 161
143 atm_force_charge(priv->lecd, skb2->truesize); 162 priv = (struct lec_priv *)dev->priv;
163 atm_force_charge(priv->lecd, skb2->truesize);
144 sk = sk_atm(priv->lecd); 164 sk = sk_atm(priv->lecd);
145 skb_queue_tail(&sk->sk_receive_queue, skb2); 165 skb_queue_tail(&sk->sk_receive_queue, skb2);
146 sk->sk_data_ready(sk, skb2->len); 166 sk->sk_data_ready(sk, skb2->len);
147 } 167 }
148 168
149 return; 169 return;
150} 170}
151#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 171#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
152 172
@@ -162,36 +182,35 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
162#ifdef CONFIG_TR 182#ifdef CONFIG_TR
163static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) 183static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
164{ 184{
165 struct trh_hdr *trh; 185 struct trh_hdr *trh;
166 int riflen, num_rdsc; 186 int riflen, num_rdsc;
167 187
168 trh = (struct trh_hdr *)packet; 188 trh = (struct trh_hdr *)packet;
169 if (trh->daddr[0] & (uint8_t)0x80) 189 if (trh->daddr[0] & (uint8_t) 0x80)
170 return bus_mac; /* multicast */ 190 return bus_mac; /* multicast */
171 191
172 if (trh->saddr[0] & TR_RII) { 192 if (trh->saddr[0] & TR_RII) {
173 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; 193 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
174 if ((ntohs(trh->rcf) >> 13) != 0) 194 if ((ntohs(trh->rcf) >> 13) != 0)
175 return bus_mac; /* ARE or STE */ 195 return bus_mac; /* ARE or STE */
176 } 196 } else
177 else 197 return trh->daddr; /* not source routed */
178 return trh->daddr; /* not source routed */ 198
179 199 if (riflen < 6)
180 if (riflen < 6) 200 return trh->daddr; /* last hop, source routed */
181 return trh->daddr; /* last hop, source routed */ 201
182 202 /* riflen is 6 or more, packet has more than one route descriptor */
183 /* riflen is 6 or more, packet has more than one route descriptor */ 203 num_rdsc = (riflen / 2) - 1;
184 num_rdsc = (riflen/2) - 1; 204 memset(rdesc, 0, ETH_ALEN);
185 memset(rdesc, 0, ETH_ALEN); 205 /* offset 4 comes from LAN destination field in LE control frames */
186 /* offset 4 comes from LAN destination field in LE control frames */ 206 if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
187 if (trh->rcf & htons((uint16_t)TR_RCF_DIR_BIT)) 207 memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(uint16_t));
188 memcpy(&rdesc[4], &trh->rseg[num_rdsc-2], sizeof(uint16_t)); 208 else {
189 else { 209 memcpy(&rdesc[4], &trh->rseg[1], sizeof(uint16_t));
190 memcpy(&rdesc[4], &trh->rseg[1], sizeof(uint16_t)); 210 rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
191 rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); 211 }
192 } 212
193 213 return NULL;
194 return NULL;
195} 214}
196#endif /* CONFIG_TR */ 215#endif /* CONFIG_TR */
197 216
@@ -204,15 +223,14 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
204 * there is non-reboot way to recover if something goes wrong. 223 * there is non-reboot way to recover if something goes wrong.
205 */ 224 */
206 225
207static int 226static int lec_open(struct net_device *dev)
208lec_open(struct net_device *dev)
209{ 227{
210 struct lec_priv *priv = (struct lec_priv *)dev->priv; 228 struct lec_priv *priv = (struct lec_priv *)dev->priv;
211 229
212 netif_start_queue(dev); 230 netif_start_queue(dev);
213 memset(&priv->stats,0,sizeof(struct net_device_stats)); 231 memset(&priv->stats, 0, sizeof(struct net_device_stats));
214 232
215 return 0; 233 return 0;
216} 234}
217 235
218static __inline__ void 236static __inline__ void
@@ -231,160 +249,166 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb, struct lec_priv *priv)
231 priv->stats.tx_bytes += skb->len; 249 priv->stats.tx_bytes += skb->len;
232} 250}
233 251
234static void 252static void lec_tx_timeout(struct net_device *dev)
235lec_tx_timeout(struct net_device *dev)
236{ 253{
237 printk(KERN_INFO "%s: tx timeout\n", dev->name); 254 printk(KERN_INFO "%s: tx timeout\n", dev->name);
238 dev->trans_start = jiffies; 255 dev->trans_start = jiffies;
239 netif_wake_queue(dev); 256 netif_wake_queue(dev);
240} 257}
241 258
242static int 259static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
243lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
244{ 260{
245 struct sk_buff *skb2; 261 struct sk_buff *skb2;
246 struct lec_priv *priv = (struct lec_priv *)dev->priv; 262 struct lec_priv *priv = (struct lec_priv *)dev->priv;
247 struct lecdatahdr_8023 *lec_h; 263 struct lecdatahdr_8023 *lec_h;
248 struct atm_vcc *vcc; 264 struct atm_vcc *vcc;
249 struct lec_arp_table *entry; 265 struct lec_arp_table *entry;
250 unsigned char *dst; 266 unsigned char *dst;
251 int min_frame_size; 267 int min_frame_size;
252#ifdef CONFIG_TR 268#ifdef CONFIG_TR
253 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ 269 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
254#endif 270#endif
255 int is_rdesc; 271 int is_rdesc;
256#if DUMP_PACKETS > 0 272#if DUMP_PACKETS > 0
257 char buf[300]; 273 char buf[300];
258 int i=0; 274 int i = 0;
259#endif /* DUMP_PACKETS >0 */ 275#endif /* DUMP_PACKETS >0 */
260 276
261 DPRINTK("lec_start_xmit called\n"); 277 DPRINTK("lec_start_xmit called\n");
262 if (!priv->lecd) { 278 if (!priv->lecd) {
263 printk("%s:No lecd attached\n",dev->name); 279 printk("%s:No lecd attached\n", dev->name);
264 priv->stats.tx_errors++; 280 priv->stats.tx_errors++;
265 netif_stop_queue(dev); 281 netif_stop_queue(dev);
266 return -EUNATCH; 282 return -EUNATCH;
267 } 283 }
268 284
269 DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 285 DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
270 (long)skb->head, (long)skb->data, (long)skb->tail, 286 (long)skb->head, (long)skb->data, (long)skb->tail,
271 (long)skb->end); 287 (long)skb->end);
272#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 288#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
273 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) 289 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
274 lec_handle_bridge(skb, dev); 290 lec_handle_bridge(skb, dev);
275#endif 291#endif
276 292
277 /* Make sure we have room for lec_id */ 293 /* Make sure we have room for lec_id */
278 if (skb_headroom(skb) < 2) { 294 if (skb_headroom(skb) < 2) {
279 295
280 DPRINTK("lec_start_xmit: reallocating skb\n"); 296 DPRINTK("lec_start_xmit: reallocating skb\n");
281 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 297 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
282 kfree_skb(skb); 298 kfree_skb(skb);
283 if (skb2 == NULL) return 0; 299 if (skb2 == NULL)
284 skb = skb2; 300 return 0;
285 } 301 skb = skb2;
286 skb_push(skb, 2); 302 }
303 skb_push(skb, 2);
287 304
288 /* Put le header to place, works for TokenRing too */ 305 /* Put le header to place, works for TokenRing too */
289 lec_h = (struct lecdatahdr_8023*)skb->data; 306 lec_h = (struct lecdatahdr_8023 *)skb->data;
290 lec_h->le_header = htons(priv->lecid); 307 lec_h->le_header = htons(priv->lecid);
291 308
292#ifdef CONFIG_TR 309#ifdef CONFIG_TR
293 /* Ugly. Use this to realign Token Ring packets for 310 /*
294 * e.g. PCA-200E driver. */ 311 * Ugly. Use this to realign Token Ring packets for
295 if (priv->is_trdev) { 312 * e.g. PCA-200E driver.
296 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 313 */
297 kfree_skb(skb); 314 if (priv->is_trdev) {
298 if (skb2 == NULL) return 0; 315 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
299 skb = skb2; 316 kfree_skb(skb);
300 } 317 if (skb2 == NULL)
318 return 0;
319 skb = skb2;
320 }
301#endif 321#endif
302 322
303#if DUMP_PACKETS > 0 323#if DUMP_PACKETS > 0
304 printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name, 324 printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name,
305 skb->len, priv->lecid); 325 skb->len, priv->lecid);
306#if DUMP_PACKETS >= 2 326#if DUMP_PACKETS >= 2
307 for(i=0;i<skb->len && i <99;i++) { 327 for (i = 0; i < skb->len && i < 99; i++) {
308 sprintf(buf+i*3,"%2.2x ",0xff&skb->data[i]); 328 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
309 } 329 }
310#elif DUMP_PACKETS >= 1 330#elif DUMP_PACKETS >= 1
311 for(i=0;i<skb->len && i < 30;i++) { 331 for (i = 0; i < skb->len && i < 30; i++) {
312 sprintf(buf+i*3,"%2.2x ", 0xff&skb->data[i]); 332 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
313 } 333 }
314#endif /* DUMP_PACKETS >= 1 */ 334#endif /* DUMP_PACKETS >= 1 */
315 if (i==skb->len) 335 if (i == skb->len)
316 printk("%s\n",buf); 336 printk("%s\n", buf);
317 else 337 else
318 printk("%s...\n",buf); 338 printk("%s...\n", buf);
319#endif /* DUMP_PACKETS > 0 */ 339#endif /* DUMP_PACKETS > 0 */
320 340
321 /* Minimum ethernet-frame size */ 341 /* Minimum ethernet-frame size */
322#ifdef CONFIG_TR 342#ifdef CONFIG_TR
323 if (priv->is_trdev) 343 if (priv->is_trdev)
324 min_frame_size = LEC_MINIMUM_8025_SIZE; 344 min_frame_size = LEC_MINIMUM_8025_SIZE;
325 else 345 else
326#endif 346#endif
327 min_frame_size = LEC_MINIMUM_8023_SIZE; 347 min_frame_size = LEC_MINIMUM_8023_SIZE;
328 if (skb->len < min_frame_size) { 348 if (skb->len < min_frame_size) {
329 if ((skb->len + skb_tailroom(skb)) < min_frame_size) { 349 if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
330 skb2 = skb_copy_expand(skb, 0, 350 skb2 = skb_copy_expand(skb, 0,
331 min_frame_size - skb->truesize, GFP_ATOMIC); 351 min_frame_size - skb->truesize,
332 dev_kfree_skb(skb); 352 GFP_ATOMIC);
333 if (skb2 == NULL) { 353 dev_kfree_skb(skb);
334 priv->stats.tx_dropped++; 354 if (skb2 == NULL) {
335 return 0; 355 priv->stats.tx_dropped++;
336 } 356 return 0;
337 skb = skb2; 357 }
338 } 358 skb = skb2;
359 }
339 skb_put(skb, min_frame_size - skb->len); 360 skb_put(skb, min_frame_size - skb->len);
340 } 361 }
341 362
342 /* Send to right vcc */ 363 /* Send to right vcc */
343 is_rdesc = 0; 364 is_rdesc = 0;
344 dst = lec_h->h_dest; 365 dst = lec_h->h_dest;
345#ifdef CONFIG_TR 366#ifdef CONFIG_TR
346 if (priv->is_trdev) { 367 if (priv->is_trdev) {
347 dst = get_tr_dst(skb->data+2, rdesc); 368 dst = get_tr_dst(skb->data + 2, rdesc);
348 if (dst == NULL) { 369 if (dst == NULL) {
349 dst = rdesc; 370 dst = rdesc;
350 is_rdesc = 1; 371 is_rdesc = 1;
351 } 372 }
352 } 373 }
353#endif 374#endif
354 entry = NULL; 375 entry = NULL;
355 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); 376 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
356 DPRINTK("%s:vcc:%p vcc_flags:%x, entry:%p\n", dev->name, 377 DPRINTK("%s:vcc:%p vcc_flags:%x, entry:%p\n", dev->name,
357 vcc, vcc?vcc->flags:0, entry); 378 vcc, vcc ? vcc->flags : 0, entry);
358 if (!vcc || !test_bit(ATM_VF_READY,&vcc->flags)) { 379 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
359 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { 380 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
360 DPRINTK("%s:lec_start_xmit: queuing packet, ", dev->name); 381 DPRINTK("%s:lec_start_xmit: queuing packet, ",
361 DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n", 382 dev->name);
362 lec_h->h_dest[0], lec_h->h_dest[1], lec_h->h_dest[2], 383 DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n",
363 lec_h->h_dest[3], lec_h->h_dest[4], lec_h->h_dest[5]); 384 lec_h->h_dest[0], lec_h->h_dest[1],
364 skb_queue_tail(&entry->tx_wait, skb); 385 lec_h->h_dest[2], lec_h->h_dest[3],
365 } else { 386 lec_h->h_dest[4], lec_h->h_dest[5]);
366 DPRINTK("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", dev->name); 387 skb_queue_tail(&entry->tx_wait, skb);
367 DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n", 388 } else {
368 lec_h->h_dest[0], lec_h->h_dest[1], lec_h->h_dest[2], 389 DPRINTK
369 lec_h->h_dest[3], lec_h->h_dest[4], lec_h->h_dest[5]); 390 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ",
370 priv->stats.tx_dropped++; 391 dev->name);
371 dev_kfree_skb(skb); 392 DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n",
372 } 393 lec_h->h_dest[0], lec_h->h_dest[1],
373 return 0; 394 lec_h->h_dest[2], lec_h->h_dest[3],
374 } 395 lec_h->h_dest[4], lec_h->h_dest[5]);
375 396 priv->stats.tx_dropped++;
376#if DUMP_PACKETS > 0 397 dev_kfree_skb(skb);
377 printk("%s:sending to vpi:%d vci:%d\n", dev->name, 398 }
378 vcc->vpi, vcc->vci); 399 goto out;
400 }
401#if DUMP_PACKETS > 0
402 printk("%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci);
379#endif /* DUMP_PACKETS > 0 */ 403#endif /* DUMP_PACKETS > 0 */
380 404
381 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { 405 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
382 DPRINTK("lec.c: emptying tx queue, "); 406 DPRINTK("lec.c: emptying tx queue, ");
383 DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n", 407 DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n",
384 lec_h->h_dest[0], lec_h->h_dest[1], lec_h->h_dest[2], 408 lec_h->h_dest[0], lec_h->h_dest[1], lec_h->h_dest[2],
385 lec_h->h_dest[3], lec_h->h_dest[4], lec_h->h_dest[5]); 409 lec_h->h_dest[3], lec_h->h_dest[4], lec_h->h_dest[5]);
386 lec_send(vcc, skb2, priv); 410 lec_send(vcc, skb2, priv);
387 } 411 }
388 412
389 lec_send(vcc, skb, priv); 413 lec_send(vcc, skb, priv);
390 414
@@ -404,210 +428,219 @@ lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
404 netif_wake_queue(dev); 428 netif_wake_queue(dev);
405 } 429 }
406 430
431out:
432 if (entry)
433 lec_arp_put(entry);
407 dev->trans_start = jiffies; 434 dev->trans_start = jiffies;
408 return 0; 435 return 0;
409} 436}
410 437
411/* The inverse routine to net_open(). */ 438/* The inverse routine to net_open(). */
412static int 439static int lec_close(struct net_device *dev)
413lec_close(struct net_device *dev)
414{ 440{
415 netif_stop_queue(dev); 441 netif_stop_queue(dev);
416 return 0; 442 return 0;
417} 443}
418 444
419/* 445/*
420 * Get the current statistics. 446 * Get the current statistics.
421 * This may be called with the card open or closed. 447 * This may be called with the card open or closed.
422 */ 448 */
423static struct net_device_stats * 449static struct net_device_stats *lec_get_stats(struct net_device *dev)
424lec_get_stats(struct net_device *dev)
425{ 450{
426 return &((struct lec_priv *)dev->priv)->stats; 451 return &((struct lec_priv *)dev->priv)->stats;
427} 452}
428 453
429static int 454static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
430lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
431{ 455{
432 unsigned long flags; 456 unsigned long flags;
433 struct net_device *dev = (struct net_device*)vcc->proto_data; 457 struct net_device *dev = (struct net_device *)vcc->proto_data;
434 struct lec_priv *priv = (struct lec_priv*)dev->priv; 458 struct lec_priv *priv = (struct lec_priv *)dev->priv;
435 struct atmlec_msg *mesg; 459 struct atmlec_msg *mesg;
436 struct lec_arp_table *entry; 460 struct lec_arp_table *entry;
437 int i; 461 int i;
438 char *tmp; /* FIXME */ 462 char *tmp; /* FIXME */
439 463
440 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 464 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
441 mesg = (struct atmlec_msg *)skb->data; 465 mesg = (struct atmlec_msg *)skb->data;
442 tmp = skb->data; 466 tmp = skb->data;
443 tmp += sizeof(struct atmlec_msg); 467 tmp += sizeof(struct atmlec_msg);
444 DPRINTK("%s: msg from zeppelin:%d\n", dev->name, mesg->type); 468 DPRINTK("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
445 switch(mesg->type) { 469 switch (mesg->type) {
446 case l_set_mac_addr: 470 case l_set_mac_addr:
447 for (i=0;i<6;i++) { 471 for (i = 0; i < 6; i++) {
448 dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; 472 dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
449 } 473 }
450 break; 474 break;
451 case l_del_mac_addr: 475 case l_del_mac_addr:
452 for(i=0;i<6;i++) { 476 for (i = 0; i < 6; i++) {
453 dev->dev_addr[i] = 0; 477 dev->dev_addr[i] = 0;
454 } 478 }
455 break; 479 break;
456 case l_addr_delete: 480 case l_addr_delete:
457 lec_addr_delete(priv, mesg->content.normal.atm_addr, 481 lec_addr_delete(priv, mesg->content.normal.atm_addr,
458 mesg->content.normal.flag); 482 mesg->content.normal.flag);
459 break; 483 break;
460 case l_topology_change: 484 case l_topology_change:
461 priv->topology_change = mesg->content.normal.flag; 485 priv->topology_change = mesg->content.normal.flag;
462 break; 486 break;
463 case l_flush_complete: 487 case l_flush_complete:
464 lec_flush_complete(priv, mesg->content.normal.flag); 488 lec_flush_complete(priv, mesg->content.normal.flag);
465 break; 489 break;
466 case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */ 490 case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */
467 spin_lock_irqsave(&priv->lec_arp_lock, flags); 491 spin_lock_irqsave(&priv->lec_arp_lock, flags);
468 entry = lec_arp_find(priv, mesg->content.normal.mac_addr); 492 entry = lec_arp_find(priv, mesg->content.normal.mac_addr);
469 lec_arp_remove(priv, entry); 493 lec_arp_remove(priv, entry);
470 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 494 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
471 495
472 if (mesg->content.normal.no_source_le_narp) 496 if (mesg->content.normal.no_source_le_narp)
473 break; 497 break;
474 /* FALL THROUGH */ 498 /* FALL THROUGH */
475 case l_arp_update: 499 case l_arp_update:
476 lec_arp_update(priv, mesg->content.normal.mac_addr, 500 lec_arp_update(priv, mesg->content.normal.mac_addr,
477 mesg->content.normal.atm_addr, 501 mesg->content.normal.atm_addr,
478 mesg->content.normal.flag, 502 mesg->content.normal.flag,
479 mesg->content.normal.targetless_le_arp); 503 mesg->content.normal.targetless_le_arp);
480 DPRINTK("lec: in l_arp_update\n"); 504 DPRINTK("lec: in l_arp_update\n");
481 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ 505 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
482 DPRINTK("lec: LANE2 3.1.5, got tlvs, size %d\n", mesg->sizeoftlvs); 506 DPRINTK("lec: LANE2 3.1.5, got tlvs, size %d\n",
483 lane2_associate_ind(dev, 507 mesg->sizeoftlvs);
484 mesg->content.normal.mac_addr, 508 lane2_associate_ind(dev, mesg->content.normal.mac_addr,
485 tmp, mesg->sizeoftlvs); 509 tmp, mesg->sizeoftlvs);
486 } 510 }
487 break; 511 break;
488 case l_config: 512 case l_config:
489 priv->maximum_unknown_frame_count = 513 priv->maximum_unknown_frame_count =
490 mesg->content.config.maximum_unknown_frame_count; 514 mesg->content.config.maximum_unknown_frame_count;
491 priv->max_unknown_frame_time = 515 priv->max_unknown_frame_time =
492 (mesg->content.config.max_unknown_frame_time*HZ); 516 (mesg->content.config.max_unknown_frame_time * HZ);
493 priv->max_retry_count = 517 priv->max_retry_count = mesg->content.config.max_retry_count;
494 mesg->content.config.max_retry_count; 518 priv->aging_time = (mesg->content.config.aging_time * HZ);
495 priv->aging_time = (mesg->content.config.aging_time*HZ); 519 priv->forward_delay_time =
496 priv->forward_delay_time = 520 (mesg->content.config.forward_delay_time * HZ);
497 (mesg->content.config.forward_delay_time*HZ); 521 priv->arp_response_time =
498 priv->arp_response_time = 522 (mesg->content.config.arp_response_time * HZ);
499 (mesg->content.config.arp_response_time*HZ); 523 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
500 priv->flush_timeout = (mesg->content.config.flush_timeout*HZ); 524 priv->path_switching_delay =
501 priv->path_switching_delay = 525 (mesg->content.config.path_switching_delay * HZ);
502 (mesg->content.config.path_switching_delay*HZ); 526 priv->lane_version = mesg->content.config.lane_version; /* LANE2 */
503 priv->lane_version = mesg->content.config.lane_version; /* LANE2 */
504 priv->lane2_ops = NULL; 527 priv->lane2_ops = NULL;
505 if (priv->lane_version > 1) 528 if (priv->lane_version > 1)
506 priv->lane2_ops = &lane2_ops; 529 priv->lane2_ops = &lane2_ops;
507 if (dev->change_mtu(dev, mesg->content.config.mtu)) 530 if (dev->change_mtu(dev, mesg->content.config.mtu))
508 printk("%s: change_mtu to %d failed\n", dev->name, 531 printk("%s: change_mtu to %d failed\n", dev->name,
509 mesg->content.config.mtu); 532 mesg->content.config.mtu);
510 priv->is_proxy = mesg->content.config.is_proxy; 533 priv->is_proxy = mesg->content.config.is_proxy;
511 break; 534 break;
512 case l_flush_tran_id: 535 case l_flush_tran_id:
513 lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr, 536 lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr,
514 mesg->content.normal.flag); 537 mesg->content.normal.flag);
515 break; 538 break;
516 case l_set_lecid: 539 case l_set_lecid:
517 priv->lecid=(unsigned short)(0xffff&mesg->content.normal.flag); 540 priv->lecid =
518 break; 541 (unsigned short)(0xffff & mesg->content.normal.flag);
519 case l_should_bridge: { 542 break;
543 case l_should_bridge:
520#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 544#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
521 struct net_bridge_fdb_entry *f; 545 {
522 546 struct net_bridge_fdb_entry *f;
523 DPRINTK("%s: bridge zeppelin asks about 0x%02x:%02x:%02x:%02x:%02x:%02x\n", 547
524 dev->name, 548 DPRINTK
525 mesg->content.proxy.mac_addr[0], mesg->content.proxy.mac_addr[1], 549 ("%s: bridge zeppelin asks about 0x%02x:%02x:%02x:%02x:%02x:%02x\n",
526 mesg->content.proxy.mac_addr[2], mesg->content.proxy.mac_addr[3], 550 dev->name, mesg->content.proxy.mac_addr[0],
527 mesg->content.proxy.mac_addr[4], mesg->content.proxy.mac_addr[5]); 551 mesg->content.proxy.mac_addr[1],
528 552 mesg->content.proxy.mac_addr[2],
529 if (br_fdb_get_hook == NULL || dev->br_port == NULL) 553 mesg->content.proxy.mac_addr[3],
530 break; 554 mesg->content.proxy.mac_addr[4],
531 555 mesg->content.proxy.mac_addr[5]);
532 f = br_fdb_get_hook(dev->br_port->br, mesg->content.proxy.mac_addr); 556
533 if (f != NULL && 557 if (br_fdb_get_hook == NULL || dev->br_port == NULL)
534 f->dst->dev != dev && 558 break;
535 f->dst->state == BR_STATE_FORWARDING) { 559
536 /* hit from bridge table, send LE_ARP_RESPONSE */ 560 f = br_fdb_get_hook(dev->br_port->br,
537 struct sk_buff *skb2; 561 mesg->content.proxy.mac_addr);
538 struct sock *sk; 562 if (f != NULL && f->dst->dev != dev
539 563 && f->dst->state == BR_STATE_FORWARDING) {
540 DPRINTK("%s: entry found, responding to zeppelin\n", dev->name); 564 /* hit from bridge table, send LE_ARP_RESPONSE */
541 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 565 struct sk_buff *skb2;
542 if (skb2 == NULL) { 566 struct sock *sk;
543 br_fdb_put_hook(f); 567
544 break; 568 DPRINTK
545 } 569 ("%s: entry found, responding to zeppelin\n",
546 skb2->len = sizeof(struct atmlec_msg); 570 dev->name);
547 memcpy(skb2->data, mesg, sizeof(struct atmlec_msg)); 571 skb2 =
548 atm_force_charge(priv->lecd, skb2->truesize); 572 alloc_skb(sizeof(struct atmlec_msg),
549 sk = sk_atm(priv->lecd); 573 GFP_ATOMIC);
550 skb_queue_tail(&sk->sk_receive_queue, skb2); 574 if (skb2 == NULL) {
551 sk->sk_data_ready(sk, skb2->len); 575 br_fdb_put_hook(f);
552 } 576 break;
553 if (f != NULL) br_fdb_put_hook(f); 577 }
578 skb2->len = sizeof(struct atmlec_msg);
579 memcpy(skb2->data, mesg,
580 sizeof(struct atmlec_msg));
581 atm_force_charge(priv->lecd, skb2->truesize);
582 sk = sk_atm(priv->lecd);
583 skb_queue_tail(&sk->sk_receive_queue, skb2);
584 sk->sk_data_ready(sk, skb2->len);
585 }
586 if (f != NULL)
587 br_fdb_put_hook(f);
588 }
554#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 589#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
555 } 590 break;
556 break; 591 default:
557 default: 592 printk("%s: Unknown message type %d\n", dev->name, mesg->type);
558 printk("%s: Unknown message type %d\n", dev->name, mesg->type); 593 dev_kfree_skb(skb);
559 dev_kfree_skb(skb); 594 return -EINVAL;
560 return -EINVAL; 595 }
561 } 596 dev_kfree_skb(skb);
562 dev_kfree_skb(skb); 597 return 0;
563 return 0;
564} 598}
565 599
566static void 600static void lec_atm_close(struct atm_vcc *vcc)
567lec_atm_close(struct atm_vcc *vcc)
568{ 601{
569 struct sk_buff *skb; 602 struct sk_buff *skb;
570 struct net_device *dev = (struct net_device *)vcc->proto_data; 603 struct net_device *dev = (struct net_device *)vcc->proto_data;
571 struct lec_priv *priv = (struct lec_priv *)dev->priv; 604 struct lec_priv *priv = (struct lec_priv *)dev->priv;
572 605
573 priv->lecd = NULL; 606 priv->lecd = NULL;
574 /* Do something needful? */ 607 /* Do something needful? */
575 608
576 netif_stop_queue(dev); 609 netif_stop_queue(dev);
577 lec_arp_destroy(priv); 610 lec_arp_destroy(priv);
578 611
579 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 612 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
580 printk("%s lec_atm_close: closing with messages pending\n", 613 printk("%s lec_atm_close: closing with messages pending\n",
581 dev->name); 614 dev->name);
582 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) { 615 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) {
583 atm_return(vcc, skb->truesize); 616 atm_return(vcc, skb->truesize);
584 dev_kfree_skb(skb); 617 dev_kfree_skb(skb);
585 } 618 }
586 619
587 printk("%s: Shut down!\n", dev->name); 620 printk("%s: Shut down!\n", dev->name);
588 module_put(THIS_MODULE); 621 module_put(THIS_MODULE);
589} 622}
590 623
591static struct atmdev_ops lecdev_ops = { 624static struct atmdev_ops lecdev_ops = {
592 .close = lec_atm_close, 625 .close = lec_atm_close,
593 .send = lec_atm_send 626 .send = lec_atm_send
594}; 627};
595 628
596static struct atm_dev lecatm_dev = { 629static struct atm_dev lecatm_dev = {
597 .ops = &lecdev_ops, 630 .ops = &lecdev_ops,
598 .type = "lec", 631 .type = "lec",
599 .number = 999, /* dummy device number */ 632 .number = 999, /* dummy device number */
600 .lock = SPIN_LOCK_UNLOCKED 633 .lock = SPIN_LOCK_UNLOCKED
601}; 634};
602 635
603/* 636/*
604 * LANE2: new argument struct sk_buff *data contains 637 * LANE2: new argument struct sk_buff *data contains
605 * the LE_ARP based TLVs introduced in the LANE2 spec 638 * the LE_ARP based TLVs introduced in the LANE2 spec
606 */ 639 */
607static int 640static int
608send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, 641send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
609 unsigned char *mac_addr, unsigned char *atm_addr, 642 unsigned char *mac_addr, unsigned char *atm_addr,
610 struct sk_buff *data) 643 struct sk_buff *data)
611{ 644{
612 struct sock *sk; 645 struct sock *sk;
613 struct sk_buff *skb; 646 struct sk_buff *skb;
@@ -621,187 +654,193 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
621 return -1; 654 return -1;
622 skb->len = sizeof(struct atmlec_msg); 655 skb->len = sizeof(struct atmlec_msg);
623 mesg = (struct atmlec_msg *)skb->data; 656 mesg = (struct atmlec_msg *)skb->data;
624 memset(mesg, 0, sizeof(struct atmlec_msg)); 657 memset(mesg, 0, sizeof(struct atmlec_msg));
625 mesg->type = type; 658 mesg->type = type;
626 if (data != NULL) 659 if (data != NULL)
627 mesg->sizeoftlvs = data->len; 660 mesg->sizeoftlvs = data->len;
628 if (mac_addr) 661 if (mac_addr)
629 memcpy(&mesg->content.normal.mac_addr, mac_addr, ETH_ALEN); 662 memcpy(&mesg->content.normal.mac_addr, mac_addr, ETH_ALEN);
630 else 663 else
631 mesg->content.normal.targetless_le_arp = 1; 664 mesg->content.normal.targetless_le_arp = 1;
632 if (atm_addr) 665 if (atm_addr)
633 memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN); 666 memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN);
634 667
635 atm_force_charge(priv->lecd, skb->truesize); 668 atm_force_charge(priv->lecd, skb->truesize);
636 sk = sk_atm(priv->lecd); 669 sk = sk_atm(priv->lecd);
637 skb_queue_tail(&sk->sk_receive_queue, skb); 670 skb_queue_tail(&sk->sk_receive_queue, skb);
638 sk->sk_data_ready(sk, skb->len); 671 sk->sk_data_ready(sk, skb->len);
639 672
640 if (data != NULL) { 673 if (data != NULL) {
641 DPRINTK("lec: about to send %d bytes of data\n", data->len); 674 DPRINTK("lec: about to send %d bytes of data\n", data->len);
642 atm_force_charge(priv->lecd, data->truesize); 675 atm_force_charge(priv->lecd, data->truesize);
643 skb_queue_tail(&sk->sk_receive_queue, data); 676 skb_queue_tail(&sk->sk_receive_queue, data);
644 sk->sk_data_ready(sk, skb->len); 677 sk->sk_data_ready(sk, skb->len);
645 } 678 }
646 679
647 return 0; 680 return 0;
648} 681}
649 682
650/* shamelessly stolen from drivers/net/net_init.c */ 683/* shamelessly stolen from drivers/net/net_init.c */
651static int lec_change_mtu(struct net_device *dev, int new_mtu) 684static int lec_change_mtu(struct net_device *dev, int new_mtu)
652{ 685{
653 if ((new_mtu < 68) || (new_mtu > 18190)) 686 if ((new_mtu < 68) || (new_mtu > 18190))
654 return -EINVAL; 687 return -EINVAL;
655 dev->mtu = new_mtu; 688 dev->mtu = new_mtu;
656 return 0; 689 return 0;
657} 690}
658 691
659static void lec_set_multicast_list(struct net_device *dev) 692static void lec_set_multicast_list(struct net_device *dev)
660{ 693{
661 /* by default, all multicast frames arrive over the bus. 694 /*
662 * eventually support selective multicast service 695 * by default, all multicast frames arrive over the bus.
663 */ 696 * eventually support selective multicast service
664 return; 697 */
698 return;
665} 699}
666 700
667static void 701static void lec_init(struct net_device *dev)
668lec_init(struct net_device *dev)
669{ 702{
670 dev->change_mtu = lec_change_mtu; 703 dev->change_mtu = lec_change_mtu;
671 dev->open = lec_open; 704 dev->open = lec_open;
672 dev->stop = lec_close; 705 dev->stop = lec_close;
673 dev->hard_start_xmit = lec_start_xmit; 706 dev->hard_start_xmit = lec_start_xmit;
674 dev->tx_timeout = lec_tx_timeout; 707 dev->tx_timeout = lec_tx_timeout;
675 708
676 dev->get_stats = lec_get_stats; 709 dev->get_stats = lec_get_stats;
677 dev->set_multicast_list = lec_set_multicast_list; 710 dev->set_multicast_list = lec_set_multicast_list;
678 dev->do_ioctl = NULL; 711 dev->do_ioctl = NULL;
679 printk("%s: Initialized!\n",dev->name); 712 printk("%s: Initialized!\n", dev->name);
680 return; 713 return;
681} 714}
682 715
683static unsigned char lec_ctrl_magic[] = { 716static unsigned char lec_ctrl_magic[] = {
684 0xff, 717 0xff,
685 0x00, 718 0x00,
686 0x01, 719 0x01,
687 0x01 }; 720 0x01
721};
688 722
689#define LEC_DATA_DIRECT_8023 2 723#define LEC_DATA_DIRECT_8023 2
690#define LEC_DATA_DIRECT_8025 3 724#define LEC_DATA_DIRECT_8025 3
691 725
692static int lec_is_data_direct(struct atm_vcc *vcc) 726static int lec_is_data_direct(struct atm_vcc *vcc)
693{ 727{
694 return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) || 728 return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) ||
695 (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025)); 729 (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025));
696} 730}
697 731
698static void 732static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
699lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
700{ 733{
701 unsigned long flags; 734 unsigned long flags;
702 struct net_device *dev = (struct net_device *)vcc->proto_data; 735 struct net_device *dev = (struct net_device *)vcc->proto_data;
703 struct lec_priv *priv = (struct lec_priv *)dev->priv; 736 struct lec_priv *priv = (struct lec_priv *)dev->priv;
704 737
705#if DUMP_PACKETS >0 738#if DUMP_PACKETS >0
706 int i=0; 739 int i = 0;
707 char buf[300]; 740 char buf[300];
708 741
709 printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name, 742 printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name,
710 vcc->vpi, vcc->vci); 743 vcc->vpi, vcc->vci);
711#endif 744#endif
712 if (!skb) { 745 if (!skb) {
713 DPRINTK("%s: null skb\n",dev->name); 746 DPRINTK("%s: null skb\n", dev->name);
714 lec_vcc_close(priv, vcc); 747 lec_vcc_close(priv, vcc);
715 return; 748 return;
716 } 749 }
717#if DUMP_PACKETS > 0 750#if DUMP_PACKETS > 0
718 printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name, 751 printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name,
719 skb->len, priv->lecid); 752 skb->len, priv->lecid);
720#if DUMP_PACKETS >= 2 753#if DUMP_PACKETS >= 2
721 for(i=0;i<skb->len && i <99;i++) { 754 for (i = 0; i < skb->len && i < 99; i++) {
722 sprintf(buf+i*3,"%2.2x ",0xff&skb->data[i]); 755 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
723 } 756 }
724#elif DUMP_PACKETS >= 1 757#elif DUMP_PACKETS >= 1
725 for(i=0;i<skb->len && i < 30;i++) { 758 for (i = 0; i < skb->len && i < 30; i++) {
726 sprintf(buf+i*3,"%2.2x ", 0xff&skb->data[i]); 759 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
727 } 760 }
728#endif /* DUMP_PACKETS >= 1 */ 761#endif /* DUMP_PACKETS >= 1 */
729 if (i==skb->len) 762 if (i == skb->len)
730 printk("%s\n",buf); 763 printk("%s\n", buf);
731 else 764 else
732 printk("%s...\n",buf); 765 printk("%s...\n", buf);
733#endif /* DUMP_PACKETS > 0 */ 766#endif /* DUMP_PACKETS > 0 */
734 if (memcmp(skb->data, lec_ctrl_magic, 4) ==0) { /* Control frame, to daemon*/ 767 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */
735 struct sock *sk = sk_atm(vcc); 768 struct sock *sk = sk_atm(vcc);
736 769
737 DPRINTK("%s: To daemon\n",dev->name); 770 DPRINTK("%s: To daemon\n", dev->name);
738 skb_queue_tail(&sk->sk_receive_queue, skb); 771 skb_queue_tail(&sk->sk_receive_queue, skb);
739 sk->sk_data_ready(sk, skb->len); 772 sk->sk_data_ready(sk, skb->len);
740 } else { /* Data frame, queue to protocol handlers */ 773 } else { /* Data frame, queue to protocol handlers */
741 struct lec_arp_table *entry; 774 struct lec_arp_table *entry;
742 unsigned char *src, *dst; 775 unsigned char *src, *dst;
743 776
744 atm_return(vcc,skb->truesize); 777 atm_return(vcc, skb->truesize);
745 if (*(uint16_t *)skb->data == htons(priv->lecid) || 778 if (*(uint16_t *) skb->data == htons(priv->lecid) ||
746 !priv->lecd || 779 !priv->lecd || !(dev->flags & IFF_UP)) {
747 !(dev->flags & IFF_UP)) { 780 /*
748 /* Probably looping back, or if lecd is missing, 781 * Probably looping back, or if lecd is missing,
749 lecd has gone down */ 782 * lecd has gone down
750 DPRINTK("Ignoring frame...\n"); 783 */
751 dev_kfree_skb(skb); 784 DPRINTK("Ignoring frame...\n");
752 return; 785 dev_kfree_skb(skb);
753 } 786 return;
787 }
754#ifdef CONFIG_TR 788#ifdef CONFIG_TR
755 if (priv->is_trdev) 789 if (priv->is_trdev)
756 dst = ((struct lecdatahdr_8025 *) skb->data)->h_dest; 790 dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest;
757 else 791 else
758#endif 792#endif
759 dst = ((struct lecdatahdr_8023 *) skb->data)->h_dest; 793 dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
760 794
761 /* If this is a Data Direct VCC, and the VCC does not match 795 /*
796 * If this is a Data Direct VCC, and the VCC does not match
762 * the LE_ARP cache entry, delete the LE_ARP cache entry. 797 * the LE_ARP cache entry, delete the LE_ARP cache entry.
763 */ 798 */
764 spin_lock_irqsave(&priv->lec_arp_lock, flags); 799 spin_lock_irqsave(&priv->lec_arp_lock, flags);
765 if (lec_is_data_direct(vcc)) { 800 if (lec_is_data_direct(vcc)) {
766#ifdef CONFIG_TR 801#ifdef CONFIG_TR
767 if (priv->is_trdev) 802 if (priv->is_trdev)
768 src = ((struct lecdatahdr_8025 *) skb->data)->h_source; 803 src =
804 ((struct lecdatahdr_8025 *)skb->data)->
805 h_source;
769 else 806 else
770#endif 807#endif
771 src = ((struct lecdatahdr_8023 *) skb->data)->h_source; 808 src =
809 ((struct lecdatahdr_8023 *)skb->data)->
810 h_source;
772 entry = lec_arp_find(priv, src); 811 entry = lec_arp_find(priv, src);
773 if (entry && entry->vcc != vcc) { 812 if (entry && entry->vcc != vcc) {
774 lec_arp_remove(priv, entry); 813 lec_arp_remove(priv, entry);
775 kfree(entry); 814 lec_arp_put(entry);
776 } 815 }
777 } 816 }
778 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 817 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
779 818
780 if (!(dst[0]&0x01) && /* Never filter Multi/Broadcast */ 819 if (!(dst[0] & 0x01) && /* Never filter Multi/Broadcast */
781 !priv->is_proxy && /* Proxy wants all the packets */ 820 !priv->is_proxy && /* Proxy wants all the packets */
782 memcmp(dst, dev->dev_addr, dev->addr_len)) { 821 memcmp(dst, dev->dev_addr, dev->addr_len)) {
783 dev_kfree_skb(skb); 822 dev_kfree_skb(skb);
784 return; 823 return;
785 } 824 }
786 if (priv->lec_arp_empty_ones) { 825 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
787 lec_arp_check_empties(priv, vcc, skb); 826 lec_arp_check_empties(priv, vcc, skb);
788 } 827 }
789 skb->dev = dev; 828 skb->dev = dev;
790 skb_pull(skb, 2); /* skip lec_id */ 829 skb_pull(skb, 2); /* skip lec_id */
791#ifdef CONFIG_TR 830#ifdef CONFIG_TR
792 if (priv->is_trdev) skb->protocol = tr_type_trans(skb, dev); 831 if (priv->is_trdev)
793 else 832 skb->protocol = tr_type_trans(skb, dev);
833 else
794#endif 834#endif
795 skb->protocol = eth_type_trans(skb, dev); 835 skb->protocol = eth_type_trans(skb, dev);
796 priv->stats.rx_packets++; 836 priv->stats.rx_packets++;
797 priv->stats.rx_bytes += skb->len; 837 priv->stats.rx_bytes += skb->len;
798 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); 838 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
799 netif_rx(skb); 839 netif_rx(skb);
800 } 840 }
801} 841}
802 842
803static void 843static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
804lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
805{ 844{
806 struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); 845 struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
807 struct net_device *dev = skb->dev; 846 struct net_device *dev = skb->dev;
@@ -820,123 +859,121 @@ lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
820 } 859 }
821} 860}
822 861
823static int 862static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
824lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
825{ 863{
826 struct lec_vcc_priv *vpriv; 864 struct lec_vcc_priv *vpriv;
827 int bytes_left; 865 int bytes_left;
828 struct atmlec_ioc ioc_data; 866 struct atmlec_ioc ioc_data;
829 867
830 /* Lecd must be up in this case */ 868 /* Lecd must be up in this case */
831 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); 869 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
832 if (bytes_left != 0) { 870 if (bytes_left != 0) {
833 printk("lec: lec_vcc_attach, copy from user failed for %d bytes\n", 871 printk
834 bytes_left); 872 ("lec: lec_vcc_attach, copy from user failed for %d bytes\n",
835 } 873 bytes_left);
836 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || 874 }
837 !dev_lec[ioc_data.dev_num]) 875 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
838 return -EINVAL; 876 !dev_lec[ioc_data.dev_num])
877 return -EINVAL;
839 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 878 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL)))
840 return -ENOMEM; 879 return -ENOMEM;
841 vpriv->xoff = 0; 880 vpriv->xoff = 0;
842 vpriv->old_pop = vcc->pop; 881 vpriv->old_pop = vcc->pop;
843 vcc->user_back = vpriv; 882 vcc->user_back = vpriv;
844 vcc->pop = lec_pop; 883 vcc->pop = lec_pop;
845 lec_vcc_added(dev_lec[ioc_data.dev_num]->priv, 884 lec_vcc_added(dev_lec[ioc_data.dev_num]->priv,
846 &ioc_data, vcc, vcc->push); 885 &ioc_data, vcc, vcc->push);
847 vcc->proto_data = dev_lec[ioc_data.dev_num]; 886 vcc->proto_data = dev_lec[ioc_data.dev_num];
848 vcc->push = lec_push; 887 vcc->push = lec_push;
849 return 0; 888 return 0;
850} 889}
851 890
852static int 891static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
853lec_mcast_attach(struct atm_vcc *vcc, int arg)
854{ 892{
855 if (arg <0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) 893 if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
856 return -EINVAL; 894 return -EINVAL;
857 vcc->proto_data = dev_lec[arg]; 895 vcc->proto_data = dev_lec[arg];
858 return (lec_mcast_make((struct lec_priv*)dev_lec[arg]->priv, vcc)); 896 return (lec_mcast_make((struct lec_priv *)dev_lec[arg]->priv, vcc));
859} 897}
860 898
861/* Initialize device. */ 899/* Initialize device. */
862static int 900static int lecd_attach(struct atm_vcc *vcc, int arg)
863lecd_attach(struct atm_vcc *vcc, int arg) 901{
864{ 902 int i;
865 int i; 903 struct lec_priv *priv;
866 struct lec_priv *priv; 904
867 905 if (arg < 0)
868 if (arg<0) 906 i = 0;
869 i = 0; 907 else
870 else 908 i = arg;
871 i = arg;
872#ifdef CONFIG_TR 909#ifdef CONFIG_TR
873 if (arg >= MAX_LEC_ITF) 910 if (arg >= MAX_LEC_ITF)
874 return -EINVAL; 911 return -EINVAL;
875#else /* Reserve the top NUM_TR_DEVS for TR */ 912#else /* Reserve the top NUM_TR_DEVS for TR */
876 if (arg >= (MAX_LEC_ITF-NUM_TR_DEVS)) 913 if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS))
877 return -EINVAL; 914 return -EINVAL;
878#endif 915#endif
879 if (!dev_lec[i]) { 916 if (!dev_lec[i]) {
880 int is_trdev, size; 917 int is_trdev, size;
881 918
882 is_trdev = 0; 919 is_trdev = 0;
883 if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) 920 if (i >= (MAX_LEC_ITF - NUM_TR_DEVS))
884 is_trdev = 1; 921 is_trdev = 1;
885 922
886 size = sizeof(struct lec_priv); 923 size = sizeof(struct lec_priv);
887#ifdef CONFIG_TR 924#ifdef CONFIG_TR
888 if (is_trdev) 925 if (is_trdev)
889 dev_lec[i] = alloc_trdev(size); 926 dev_lec[i] = alloc_trdev(size);
890 else 927 else
891#endif 928#endif
892 dev_lec[i] = alloc_etherdev(size); 929 dev_lec[i] = alloc_etherdev(size);
893 if (!dev_lec[i]) 930 if (!dev_lec[i])
894 return -ENOMEM; 931 return -ENOMEM;
895 snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); 932 snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
896 if (register_netdev(dev_lec[i])) { 933 if (register_netdev(dev_lec[i])) {
897 free_netdev(dev_lec[i]); 934 free_netdev(dev_lec[i]);
898 return -EINVAL; 935 return -EINVAL;
899 } 936 }
900 937
901 priv = dev_lec[i]->priv; 938 priv = dev_lec[i]->priv;
902 priv->is_trdev = is_trdev; 939 priv->is_trdev = is_trdev;
903 lec_init(dev_lec[i]); 940 lec_init(dev_lec[i]);
904 } else { 941 } else {
905 priv = dev_lec[i]->priv; 942 priv = dev_lec[i]->priv;
906 if (priv->lecd) 943 if (priv->lecd)
907 return -EADDRINUSE; 944 return -EADDRINUSE;
908 } 945 }
909 lec_arp_init(priv); 946 lec_arp_init(priv);
910 priv->itfnum = i; /* LANE2 addition */ 947 priv->itfnum = i; /* LANE2 addition */
911 priv->lecd = vcc; 948 priv->lecd = vcc;
912 vcc->dev = &lecatm_dev; 949 vcc->dev = &lecatm_dev;
913 vcc_insert_socket(sk_atm(vcc)); 950 vcc_insert_socket(sk_atm(vcc));
914 951
915 vcc->proto_data = dev_lec[i]; 952 vcc->proto_data = dev_lec[i];
916 set_bit(ATM_VF_META,&vcc->flags); 953 set_bit(ATM_VF_META, &vcc->flags);
917 set_bit(ATM_VF_READY,&vcc->flags); 954 set_bit(ATM_VF_READY, &vcc->flags);
918 955
919 /* Set default values to these variables */ 956 /* Set default values to these variables */
920 priv->maximum_unknown_frame_count = 1; 957 priv->maximum_unknown_frame_count = 1;
921 priv->max_unknown_frame_time = (1*HZ); 958 priv->max_unknown_frame_time = (1 * HZ);
922 priv->vcc_timeout_period = (1200*HZ); 959 priv->vcc_timeout_period = (1200 * HZ);
923 priv->max_retry_count = 1; 960 priv->max_retry_count = 1;
924 priv->aging_time = (300*HZ); 961 priv->aging_time = (300 * HZ);
925 priv->forward_delay_time = (15*HZ); 962 priv->forward_delay_time = (15 * HZ);
926 priv->topology_change = 0; 963 priv->topology_change = 0;
927 priv->arp_response_time = (1*HZ); 964 priv->arp_response_time = (1 * HZ);
928 priv->flush_timeout = (4*HZ); 965 priv->flush_timeout = (4 * HZ);
929 priv->path_switching_delay = (6*HZ); 966 priv->path_switching_delay = (6 * HZ);
930 967
931 if (dev_lec[i]->flags & IFF_UP) { 968 if (dev_lec[i]->flags & IFF_UP) {
932 netif_start_queue(dev_lec[i]); 969 netif_start_queue(dev_lec[i]);
933 } 970 }
934 __module_get(THIS_MODULE); 971 __module_get(THIS_MODULE);
935 return i; 972 return i;
936} 973}
937 974
938#ifdef CONFIG_PROC_FS 975#ifdef CONFIG_PROC_FS
939static char* lec_arp_get_status_string(unsigned char status) 976static char *lec_arp_get_status_string(unsigned char status)
940{ 977{
941 static char *lec_arp_status_string[] = { 978 static char *lec_arp_status_string[] = {
942 "ESI_UNKNOWN ", 979 "ESI_UNKNOWN ",
@@ -966,52 +1003,54 @@ static void lec_info(struct seq_file *seq, struct lec_arp_table *entry)
966 if (entry->vcc) 1003 if (entry->vcc)
967 seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci); 1004 seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci);
968 else 1005 else
969 seq_printf(seq, " "); 1006 seq_printf(seq, " ");
970 if (entry->recv_vcc) { 1007 if (entry->recv_vcc) {
971 seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi, 1008 seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi,
972 entry->recv_vcc->vci); 1009 entry->recv_vcc->vci);
973 } 1010 }
974 seq_putc(seq, '\n'); 1011 seq_putc(seq, '\n');
975} 1012}
976 1013
977
978struct lec_state { 1014struct lec_state {
979 unsigned long flags; 1015 unsigned long flags;
980 struct lec_priv *locked; 1016 struct lec_priv *locked;
981 struct lec_arp_table *entry; 1017 struct hlist_node *node;
982 struct net_device *dev; 1018 struct net_device *dev;
983 int itf; 1019 int itf;
984 int arp_table; 1020 int arp_table;
985 int misc_table; 1021 int misc_table;
986}; 1022};
987 1023
988static void *lec_tbl_walk(struct lec_state *state, struct lec_arp_table *tbl, 1024static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
989 loff_t *l) 1025 loff_t *l)
990{ 1026{
991 struct lec_arp_table *e = state->entry; 1027 struct hlist_node *e = state->node;
1028 struct lec_arp_table *tmp;
992 1029
993 if (!e) 1030 if (!e)
994 e = tbl; 1031 e = tbl->first;
995 if (e == (void *)1) { 1032 if (e == (void *)1) {
996 e = tbl; 1033 e = tbl->first;
997 --*l; 1034 --*l;
998 } 1035 }
999 for (; e; e = e->next) { 1036
1037 hlist_for_each_entry_from(tmp, e, next) {
1000 if (--*l < 0) 1038 if (--*l < 0)
1001 break; 1039 break;
1002 } 1040 }
1003 state->entry = e; 1041 state->node = e;
1042
1004 return (*l < 0) ? state : NULL; 1043 return (*l < 0) ? state : NULL;
1005} 1044}
1006 1045
1007static void *lec_arp_walk(struct lec_state *state, loff_t *l, 1046static void *lec_arp_walk(struct lec_state *state, loff_t *l,
1008 struct lec_priv *priv) 1047 struct lec_priv *priv)
1009{ 1048{
1010 void *v = NULL; 1049 void *v = NULL;
1011 int p; 1050 int p;
1012 1051
1013 for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) { 1052 for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) {
1014 v = lec_tbl_walk(state, priv->lec_arp_tables[p], l); 1053 v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l);
1015 if (v) 1054 if (v)
1016 break; 1055 break;
1017 } 1056 }
@@ -1022,10 +1061,10 @@ static void *lec_arp_walk(struct lec_state *state, loff_t *l,
1022static void *lec_misc_walk(struct lec_state *state, loff_t *l, 1061static void *lec_misc_walk(struct lec_state *state, loff_t *l,
1023 struct lec_priv *priv) 1062 struct lec_priv *priv)
1024{ 1063{
1025 struct lec_arp_table *lec_misc_tables[] = { 1064 struct hlist_head *lec_misc_tables[] = {
1026 priv->lec_arp_empty_ones, 1065 &priv->lec_arp_empty_ones,
1027 priv->lec_no_forward, 1066 &priv->lec_no_forward,
1028 priv->mcast_fwds 1067 &priv->mcast_fwds
1029 }; 1068 };
1030 void *v = NULL; 1069 void *v = NULL;
1031 int q; 1070 int q;
@@ -1046,8 +1085,7 @@ static void *lec_priv_walk(struct lec_state *state, loff_t *l,
1046 state->locked = priv; 1085 state->locked = priv;
1047 spin_lock_irqsave(&priv->lec_arp_lock, state->flags); 1086 spin_lock_irqsave(&priv->lec_arp_lock, state->flags);
1048 } 1087 }
1049 if (!lec_arp_walk(state, l, priv) && 1088 if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) {
1050 !lec_misc_walk(state, l, priv)) {
1051 spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags); 1089 spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags);
1052 state->locked = NULL; 1090 state->locked = NULL;
1053 /* Partial state reset for the next time we get called */ 1091 /* Partial state reset for the next time we get called */
@@ -1081,7 +1119,7 @@ static void *lec_get_idx(struct lec_state *state, loff_t l)
1081 if (v) 1119 if (v)
1082 break; 1120 break;
1083 } 1121 }
1084 return v; 1122 return v;
1085} 1123}
1086 1124
1087static void *lec_seq_start(struct seq_file *seq, loff_t *pos) 1125static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1093,9 +1131,9 @@ static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
1093 state->locked = NULL; 1131 state->locked = NULL;
1094 state->arp_table = 0; 1132 state->arp_table = 0;
1095 state->misc_table = 0; 1133 state->misc_table = 0;
1096 state->entry = (void *)1; 1134 state->node = (void *)1;
1097 1135
1098 return *pos ? lec_get_idx(state, *pos) : (void*)1; 1136 return *pos ? lec_get_idx(state, *pos) : (void *)1;
1099} 1137}
1100 1138
1101static void lec_seq_stop(struct seq_file *seq, void *v) 1139static void lec_seq_stop(struct seq_file *seq, void *v)
@@ -1120,27 +1158,28 @@ static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1120 1158
1121static int lec_seq_show(struct seq_file *seq, void *v) 1159static int lec_seq_show(struct seq_file *seq, void *v)
1122{ 1160{
1123 static char lec_banner[] = "Itf MAC ATM destination" 1161 static char lec_banner[] = "Itf MAC ATM destination"
1124 " Status Flags " 1162 " Status Flags "
1125 "VPI/VCI Recv VPI/VCI\n"; 1163 "VPI/VCI Recv VPI/VCI\n";
1126 1164
1127 if (v == (void *)1) 1165 if (v == (void *)1)
1128 seq_puts(seq, lec_banner); 1166 seq_puts(seq, lec_banner);
1129 else { 1167 else {
1130 struct lec_state *state = seq->private; 1168 struct lec_state *state = seq->private;
1131 struct net_device *dev = state->dev; 1169 struct net_device *dev = state->dev;
1170 struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next);
1132 1171
1133 seq_printf(seq, "%s ", dev->name); 1172 seq_printf(seq, "%s ", dev->name);
1134 lec_info(seq, state->entry); 1173 lec_info(seq, entry);
1135 } 1174 }
1136 return 0; 1175 return 0;
1137} 1176}
1138 1177
1139static struct seq_operations lec_seq_ops = { 1178static struct seq_operations lec_seq_ops = {
1140 .start = lec_seq_start, 1179 .start = lec_seq_start,
1141 .next = lec_seq_next, 1180 .next = lec_seq_next,
1142 .stop = lec_seq_stop, 1181 .stop = lec_seq_stop,
1143 .show = lec_seq_show, 1182 .show = lec_seq_show,
1144}; 1183};
1145 1184
1146static int lec_seq_open(struct inode *inode, struct file *file) 1185static int lec_seq_open(struct inode *inode, struct file *file)
@@ -1174,11 +1213,11 @@ static int lec_seq_release(struct inode *inode, struct file *file)
1174} 1213}
1175 1214
1176static struct file_operations lec_seq_fops = { 1215static struct file_operations lec_seq_fops = {
1177 .owner = THIS_MODULE, 1216 .owner = THIS_MODULE,
1178 .open = lec_seq_open, 1217 .open = lec_seq_open,
1179 .read = seq_read, 1218 .read = seq_read,
1180 .llseek = seq_lseek, 1219 .llseek = seq_lseek,
1181 .release = lec_seq_release, 1220 .release = lec_seq_release,
1182}; 1221};
1183#endif 1222#endif
1184 1223
@@ -1186,38 +1225,38 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1186{ 1225{
1187 struct atm_vcc *vcc = ATM_SD(sock); 1226 struct atm_vcc *vcc = ATM_SD(sock);
1188 int err = 0; 1227 int err = 0;
1189 1228
1190 switch (cmd) { 1229 switch (cmd) {
1191 case ATMLEC_CTRL: 1230 case ATMLEC_CTRL:
1192 case ATMLEC_MCAST: 1231 case ATMLEC_MCAST:
1193 case ATMLEC_DATA: 1232 case ATMLEC_DATA:
1194 if (!capable(CAP_NET_ADMIN)) 1233 if (!capable(CAP_NET_ADMIN))
1195 return -EPERM; 1234 return -EPERM;
1196 break; 1235 break;
1197 default: 1236 default:
1198 return -ENOIOCTLCMD; 1237 return -ENOIOCTLCMD;
1199 } 1238 }
1200 1239
1201 switch (cmd) { 1240 switch (cmd) {
1202 case ATMLEC_CTRL: 1241 case ATMLEC_CTRL:
1203 err = lecd_attach(vcc, (int) arg); 1242 err = lecd_attach(vcc, (int)arg);
1204 if (err >= 0) 1243 if (err >= 0)
1205 sock->state = SS_CONNECTED; 1244 sock->state = SS_CONNECTED;
1206 break; 1245 break;
1207 case ATMLEC_MCAST: 1246 case ATMLEC_MCAST:
1208 err = lec_mcast_attach(vcc, (int) arg); 1247 err = lec_mcast_attach(vcc, (int)arg);
1209 break; 1248 break;
1210 case ATMLEC_DATA: 1249 case ATMLEC_DATA:
1211 err = lec_vcc_attach(vcc, (void __user *) arg); 1250 err = lec_vcc_attach(vcc, (void __user *)arg);
1212 break; 1251 break;
1213 } 1252 }
1214 1253
1215 return err; 1254 return err;
1216} 1255}
1217 1256
1218static struct atm_ioctl lane_ioctl_ops = { 1257static struct atm_ioctl lane_ioctl_ops = {
1219 .owner = THIS_MODULE, 1258 .owner = THIS_MODULE,
1220 .ioctl = lane_ioctl, 1259 .ioctl = lane_ioctl,
1221}; 1260};
1222 1261
1223static int __init lane_module_init(void) 1262static int __init lane_module_init(void)
@@ -1231,29 +1270,29 @@ static int __init lane_module_init(void)
1231#endif 1270#endif
1232 1271
1233 register_atm_ioctl(&lane_ioctl_ops); 1272 register_atm_ioctl(&lane_ioctl_ops);
1234 printk("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1273 printk("lec.c: " __DATE__ " " __TIME__ " initialized\n");
1235 return 0; 1274 return 0;
1236} 1275}
1237 1276
1238static void __exit lane_module_cleanup(void) 1277static void __exit lane_module_cleanup(void)
1239{ 1278{
1240 int i; 1279 int i;
1241 struct lec_priv *priv; 1280 struct lec_priv *priv;
1242 1281
1243 remove_proc_entry("lec", atm_proc_root); 1282 remove_proc_entry("lec", atm_proc_root);
1244 1283
1245 deregister_atm_ioctl(&lane_ioctl_ops); 1284 deregister_atm_ioctl(&lane_ioctl_ops);
1246 1285
1247 for (i = 0; i < MAX_LEC_ITF; i++) { 1286 for (i = 0; i < MAX_LEC_ITF; i++) {
1248 if (dev_lec[i] != NULL) { 1287 if (dev_lec[i] != NULL) {
1249 priv = (struct lec_priv *)dev_lec[i]->priv; 1288 priv = (struct lec_priv *)dev_lec[i]->priv;
1250 unregister_netdev(dev_lec[i]); 1289 unregister_netdev(dev_lec[i]);
1251 free_netdev(dev_lec[i]); 1290 free_netdev(dev_lec[i]);
1252 dev_lec[i] = NULL; 1291 dev_lec[i] = NULL;
1253 } 1292 }
1254 } 1293 }
1255 1294
1256 return; 1295 return;
1257} 1296}
1258 1297
1259module_init(lane_module_init); 1298module_init(lane_module_init);
@@ -1267,34 +1306,34 @@ module_exit(lane_module_cleanup);
1267 * If dst_mac == NULL, targetless LE_ARP will be sent 1306 * If dst_mac == NULL, targetless LE_ARP will be sent
1268 */ 1307 */
1269static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, 1308static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
1270 u8 **tlvs, u32 *sizeoftlvs) 1309 u8 **tlvs, u32 *sizeoftlvs)
1271{ 1310{
1272 unsigned long flags; 1311 unsigned long flags;
1273 struct lec_priv *priv = (struct lec_priv *)dev->priv; 1312 struct lec_priv *priv = (struct lec_priv *)dev->priv;
1274 struct lec_arp_table *table; 1313 struct lec_arp_table *table;
1275 struct sk_buff *skb; 1314 struct sk_buff *skb;
1276 int retval; 1315 int retval;
1277 1316
1278 if (force == 0) { 1317 if (force == 0) {
1279 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1318 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1280 table = lec_arp_find(priv, dst_mac); 1319 table = lec_arp_find(priv, dst_mac);
1281 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 1320 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1282 if(table == NULL) 1321 if (table == NULL)
1283 return -1; 1322 return -1;
1284 1323
1285 *tlvs = kmalloc(table->sizeoftlvs, GFP_ATOMIC); 1324 *tlvs = kmalloc(table->sizeoftlvs, GFP_ATOMIC);
1286 if (*tlvs == NULL) 1325 if (*tlvs == NULL)
1287 return -1; 1326 return -1;
1288 1327
1289 memcpy(*tlvs, table->tlvs, table->sizeoftlvs); 1328 memcpy(*tlvs, table->tlvs, table->sizeoftlvs);
1290 *sizeoftlvs = table->sizeoftlvs; 1329 *sizeoftlvs = table->sizeoftlvs;
1291 1330
1292 return 0; 1331 return 0;
1293 } 1332 }
1294 1333
1295 if (sizeoftlvs == NULL) 1334 if (sizeoftlvs == NULL)
1296 retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL); 1335 retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL);
1297 1336
1298 else { 1337 else {
1299 skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC); 1338 skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC);
1300 if (skb == NULL) 1339 if (skb == NULL)
@@ -1303,9 +1342,8 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
1303 memcpy(skb->data, *tlvs, *sizeoftlvs); 1342 memcpy(skb->data, *tlvs, *sizeoftlvs);
1304 retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb); 1343 retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb);
1305 } 1344 }
1306 return retval; 1345 return retval;
1307} 1346}
1308
1309 1347
1310/* 1348/*
1311 * LANE2: 3.1.4, LE_ASSOCIATE.request 1349 * LANE2: 3.1.4, LE_ASSOCIATE.request
@@ -1314,80 +1352,85 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
1314 * Returns 1 for success, 0 for failure (out of memory) 1352 * Returns 1 for success, 0 for failure (out of memory)
1315 * 1353 *
1316 */ 1354 */
1317static int lane2_associate_req (struct net_device *dev, u8 *lan_dst, 1355static int lane2_associate_req(struct net_device *dev, u8 *lan_dst,
1318 u8 *tlvs, u32 sizeoftlvs) 1356 u8 *tlvs, u32 sizeoftlvs)
1319{ 1357{
1320 int retval; 1358 int retval;
1321 struct sk_buff *skb; 1359 struct sk_buff *skb;
1322 struct lec_priv *priv = (struct lec_priv*)dev->priv; 1360 struct lec_priv *priv = (struct lec_priv *)dev->priv;
1323 1361
1324 if (compare_ether_addr(lan_dst, dev->dev_addr)) 1362 if (compare_ether_addr(lan_dst, dev->dev_addr))
1325 return (0); /* not our mac address */ 1363 return (0); /* not our mac address */
1326 1364
1327 kfree(priv->tlvs); /* NULL if there was no previous association */ 1365 kfree(priv->tlvs); /* NULL if there was no previous association */
1328 1366
1329 priv->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL); 1367 priv->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
1330 if (priv->tlvs == NULL) 1368 if (priv->tlvs == NULL)
1331 return (0); 1369 return (0);
1332 priv->sizeoftlvs = sizeoftlvs; 1370 priv->sizeoftlvs = sizeoftlvs;
1333 memcpy(priv->tlvs, tlvs, sizeoftlvs); 1371 memcpy(priv->tlvs, tlvs, sizeoftlvs);
1334 1372
1335 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); 1373 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
1336 if (skb == NULL) 1374 if (skb == NULL)
1337 return 0; 1375 return 0;
1338 skb->len = sizeoftlvs; 1376 skb->len = sizeoftlvs;
1339 memcpy(skb->data, tlvs, sizeoftlvs); 1377 memcpy(skb->data, tlvs, sizeoftlvs);
1340 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); 1378 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
1341 if (retval != 0) 1379 if (retval != 0)
1342 printk("lec.c: lane2_associate_req() failed\n"); 1380 printk("lec.c: lane2_associate_req() failed\n");
1343 /* If the previous association has changed we must 1381 /*
1344 * somehow notify other LANE entities about the change 1382 * If the previous association has changed we must
1345 */ 1383 * somehow notify other LANE entities about the change
1346 return (1); 1384 */
1385 return (1);
1347} 1386}
1348 1387
1349/* 1388/*
1350 * LANE2: 3.1.5, LE_ASSOCIATE.indication 1389 * LANE2: 3.1.5, LE_ASSOCIATE.indication
1351 * 1390 *
1352 */ 1391 */
1353static void lane2_associate_ind (struct net_device *dev, u8 *mac_addr, 1392static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
1354 u8 *tlvs, u32 sizeoftlvs) 1393 u8 *tlvs, u32 sizeoftlvs)
1355{ 1394{
1356#if 0 1395#if 0
1357 int i = 0; 1396 int i = 0;
1358#endif 1397#endif
1359 struct lec_priv *priv = (struct lec_priv *)dev->priv; 1398 struct lec_priv *priv = (struct lec_priv *)dev->priv;
1360#if 0 /* Why have the TLVs in LE_ARP entries since we do not use them? When you 1399#if 0 /*
1361 uncomment this code, make sure the TLVs get freed when entry is killed */ 1400 * Why have the TLVs in LE_ARP entries
1362 struct lec_arp_table *entry = lec_arp_find(priv, mac_addr); 1401 * since we do not use them? When you
1402 * uncomment this code, make sure the
1403 * TLVs get freed when entry is killed
1404 */
1405 struct lec_arp_table *entry = lec_arp_find(priv, mac_addr);
1363 1406
1364 if (entry == NULL) 1407 if (entry == NULL)
1365 return; /* should not happen */ 1408 return; /* should not happen */
1366 1409
1367 kfree(entry->tlvs); 1410 kfree(entry->tlvs);
1368 1411
1369 entry->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL); 1412 entry->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
1370 if (entry->tlvs == NULL) 1413 if (entry->tlvs == NULL)
1371 return; 1414 return;
1372 1415
1373 entry->sizeoftlvs = sizeoftlvs; 1416 entry->sizeoftlvs = sizeoftlvs;
1374 memcpy(entry->tlvs, tlvs, sizeoftlvs); 1417 memcpy(entry->tlvs, tlvs, sizeoftlvs);
1375#endif 1418#endif
1376#if 0 1419#if 0
1377 printk("lec.c: lane2_associate_ind()\n"); 1420 printk("lec.c: lane2_associate_ind()\n");
1378 printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); 1421 printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
1379 while (i < sizeoftlvs) 1422 while (i < sizeoftlvs)
1380 printk("%02x ", tlvs[i++]); 1423 printk("%02x ", tlvs[i++]);
1381 1424
1382 printk("\n"); 1425 printk("\n");
1383#endif 1426#endif
1384 1427
1385 /* tell MPOA about the TLVs we saw */ 1428 /* tell MPOA about the TLVs we saw */
1386 if (priv->lane2_ops && priv->lane2_ops->associate_indicator) { 1429 if (priv->lane2_ops && priv->lane2_ops->associate_indicator) {
1387 priv->lane2_ops->associate_indicator(dev, mac_addr, 1430 priv->lane2_ops->associate_indicator(dev, mac_addr,
1388 tlvs, sizeoftlvs); 1431 tlvs, sizeoftlvs);
1389 } 1432 }
1390 return; 1433 return;
1391} 1434}
1392 1435
1393/* 1436/*
@@ -1395,7 +1438,6 @@ static void lane2_associate_ind (struct net_device *dev, u8 *mac_addr,
1395 * 1438 *
1396 * lec_arpc.c was added here when making 1439 * lec_arpc.c was added here when making
1397 * lane client modular. October 1997 1440 * lane client modular. October 1997
1398 *
1399 */ 1441 */
1400 1442
1401#include <linux/types.h> 1443#include <linux/types.h>
@@ -1406,7 +1448,6 @@ static void lane2_associate_ind (struct net_device *dev, u8 *mac_addr,
1406#include <linux/inetdevice.h> 1448#include <linux/inetdevice.h>
1407#include <net/route.h> 1449#include <net/route.h>
1408 1450
1409
1410#if 0 1451#if 0
1411#define DPRINTK(format,args...) 1452#define DPRINTK(format,args...)
1412/* 1453/*
@@ -1417,7 +1458,7 @@ static void lane2_associate_ind (struct net_device *dev, u8 *mac_addr,
1417 1458
1418#define LEC_ARP_REFRESH_INTERVAL (3*HZ) 1459#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
1419 1460
1420static void lec_arp_check_expire(unsigned long data); 1461static void lec_arp_check_expire(void *data);
1421static void lec_arp_expire_arp(unsigned long data); 1462static void lec_arp_expire_arp(unsigned long data);
1422 1463
1423/* 1464/*
@@ -1429,474 +1470,397 @@ static void lec_arp_expire_arp(unsigned long data);
1429/* 1470/*
1430 * Initialization of arp-cache 1471 * Initialization of arp-cache
1431 */ 1472 */
1432static void 1473static void lec_arp_init(struct lec_priv *priv)
1433lec_arp_init(struct lec_priv *priv)
1434{ 1474{
1435 unsigned short i; 1475 unsigned short i;
1436 1476
1437 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1477 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1438 priv->lec_arp_tables[i] = NULL; 1478 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1439 } 1479 }
1480 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1481 INIT_HLIST_HEAD(&priv->lec_no_forward);
1482 INIT_HLIST_HEAD(&priv->mcast_fwds);
1440 spin_lock_init(&priv->lec_arp_lock); 1483 spin_lock_init(&priv->lec_arp_lock);
1441 init_timer(&priv->lec_arp_timer); 1484 INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
1442 priv->lec_arp_timer.expires = jiffies + LEC_ARP_REFRESH_INTERVAL; 1485 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
1443 priv->lec_arp_timer.data = (unsigned long)priv;
1444 priv->lec_arp_timer.function = lec_arp_check_expire;
1445 add_timer(&priv->lec_arp_timer);
1446} 1486}
1447 1487
1448static void 1488static void lec_arp_clear_vccs(struct lec_arp_table *entry)
1449lec_arp_clear_vccs(struct lec_arp_table *entry)
1450{ 1489{
1451 if (entry->vcc) { 1490 if (entry->vcc) {
1452 struct atm_vcc *vcc = entry->vcc; 1491 struct atm_vcc *vcc = entry->vcc;
1453 struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); 1492 struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
1454 struct net_device *dev = (struct net_device*) vcc->proto_data; 1493 struct net_device *dev = (struct net_device *)vcc->proto_data;
1455 1494
1456 vcc->pop = vpriv->old_pop; 1495 vcc->pop = vpriv->old_pop;
1457 if (vpriv->xoff) 1496 if (vpriv->xoff)
1458 netif_wake_queue(dev); 1497 netif_wake_queue(dev);
1459 kfree(vpriv); 1498 kfree(vpriv);
1460 vcc->user_back = NULL; 1499 vcc->user_back = NULL;
1461 vcc->push = entry->old_push; 1500 vcc->push = entry->old_push;
1462 vcc_release_async(vcc, -EPIPE); 1501 vcc_release_async(vcc, -EPIPE);
1463 vcc = NULL; 1502 entry->vcc = NULL;
1464 } 1503 }
1465 if (entry->recv_vcc) { 1504 if (entry->recv_vcc) {
1466 entry->recv_vcc->push = entry->old_recv_push; 1505 entry->recv_vcc->push = entry->old_recv_push;
1467 vcc_release_async(entry->recv_vcc, -EPIPE); 1506 vcc_release_async(entry->recv_vcc, -EPIPE);
1468 entry->recv_vcc = NULL; 1507 entry->recv_vcc = NULL;
1469 } 1508 }
1470} 1509}
1471 1510
1472/* 1511/*
1473 * Insert entry to lec_arp_table 1512 * Insert entry to lec_arp_table
1474 * LANE2: Add to the end of the list to satisfy 8.1.13 1513 * LANE2: Add to the end of the list to satisfy 8.1.13
1475 */ 1514 */
1476static inline void 1515static inline void
1477lec_arp_add(struct lec_priv *priv, struct lec_arp_table *to_add) 1516lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1478{ 1517{
1479 unsigned short place; 1518 struct hlist_head *tmp;
1480 struct lec_arp_table *tmp; 1519
1481 1520 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
1482 place = HASH(to_add->mac_addr[ETH_ALEN-1]); 1521 hlist_add_head(&entry->next, tmp);
1483 tmp = priv->lec_arp_tables[place]; 1522
1484 to_add->next = NULL; 1523 DPRINTK("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
1485 if (tmp == NULL) 1524 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1],
1486 priv->lec_arp_tables[place] = to_add; 1525 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3],
1487 1526 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]);
1488 else { /* add to the end */
1489 while (tmp->next)
1490 tmp = tmp->next;
1491 tmp->next = to_add;
1492 }
1493
1494 DPRINTK("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
1495 0xff&to_add->mac_addr[0], 0xff&to_add->mac_addr[1],
1496 0xff&to_add->mac_addr[2], 0xff&to_add->mac_addr[3],
1497 0xff&to_add->mac_addr[4], 0xff&to_add->mac_addr[5]);
1498} 1527}
1499 1528
1500/* 1529/*
1501 * Remove entry from lec_arp_table 1530 * Remove entry from lec_arp_table
1502 */ 1531 */
1503static int 1532static int
1504lec_arp_remove(struct lec_priv *priv, 1533lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1505 struct lec_arp_table *to_remove)
1506{ 1534{
1507 unsigned short place; 1535 struct hlist_node *node;
1508 struct lec_arp_table *tmp; 1536 struct lec_arp_table *entry;
1509 int remove_vcc=1; 1537 int i, remove_vcc = 1;
1510 1538
1511 if (!to_remove) { 1539 if (!to_remove) {
1512 return -1; 1540 return -1;
1513 } 1541 }
1514 place = HASH(to_remove->mac_addr[ETH_ALEN-1]); 1542
1515 tmp = priv->lec_arp_tables[place]; 1543 hlist_del(&to_remove->next);
1516 if (tmp == to_remove) { 1544 del_timer(&to_remove->timer);
1517 priv->lec_arp_tables[place] = tmp->next; 1545
1518 } else { 1546 /* If this is the only MAC connected to this VCC, also tear down the VCC */
1519 while(tmp && tmp->next != to_remove) { 1547 if (to_remove->status >= ESI_FLUSH_PENDING) {
1520 tmp = tmp->next; 1548 /*
1521 } 1549 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1522 if (!tmp) {/* Entry was not found */ 1550 */
1523 return -1; 1551 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1524 } 1552 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
1525 } 1553 if (memcmp(to_remove->atm_addr,
1526 tmp->next = to_remove->next; 1554 entry->atm_addr, ATM_ESA_LEN) == 0) {
1527 del_timer(&to_remove->timer); 1555 remove_vcc = 0;
1528 1556 break;
1529 /* If this is the only MAC connected to this VCC, also tear down 1557 }
1530 the VCC */ 1558 }
1531 if (to_remove->status >= ESI_FLUSH_PENDING) { 1559 }
1532 /* 1560 if (remove_vcc)
1533 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1561 lec_arp_clear_vccs(to_remove);
1534 */ 1562 }
1535 for(place = 0; place < LEC_ARP_TABLE_SIZE; place++) { 1563 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
1536 for(tmp = priv->lec_arp_tables[place]; tmp != NULL; tmp = tmp->next) { 1564
1537 if (memcmp(tmp->atm_addr, to_remove->atm_addr, 1565 DPRINTK("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
1538 ATM_ESA_LEN)==0) { 1566 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1],
1539 remove_vcc=0; 1567 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3],
1540 break; 1568 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]);
1541 } 1569 return 0;
1542 }
1543 }
1544 if (remove_vcc)
1545 lec_arp_clear_vccs(to_remove);
1546 }
1547 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
1548
1549 DPRINTK("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
1550 0xff&to_remove->mac_addr[0], 0xff&to_remove->mac_addr[1],
1551 0xff&to_remove->mac_addr[2], 0xff&to_remove->mac_addr[3],
1552 0xff&to_remove->mac_addr[4], 0xff&to_remove->mac_addr[5]);
1553 return 0;
1554} 1570}
1555 1571
1556#if DEBUG_ARP_TABLE 1572#if DEBUG_ARP_TABLE
1557static char* 1573static char *get_status_string(unsigned char st)
1558get_status_string(unsigned char st)
1559{ 1574{
1560 switch(st) { 1575 switch (st) {
1561 case ESI_UNKNOWN: 1576 case ESI_UNKNOWN:
1562 return "ESI_UNKNOWN"; 1577 return "ESI_UNKNOWN";
1563 case ESI_ARP_PENDING: 1578 case ESI_ARP_PENDING:
1564 return "ESI_ARP_PENDING"; 1579 return "ESI_ARP_PENDING";
1565 case ESI_VC_PENDING: 1580 case ESI_VC_PENDING:
1566 return "ESI_VC_PENDING"; 1581 return "ESI_VC_PENDING";
1567 case ESI_FLUSH_PENDING: 1582 case ESI_FLUSH_PENDING:
1568 return "ESI_FLUSH_PENDING"; 1583 return "ESI_FLUSH_PENDING";
1569 case ESI_FORWARD_DIRECT: 1584 case ESI_FORWARD_DIRECT:
1570 return "ESI_FORWARD_DIRECT"; 1585 return "ESI_FORWARD_DIRECT";
1571 default: 1586 default:
1572 return "<UNKNOWN>"; 1587 return "<UNKNOWN>";
1573 } 1588 }
1574} 1589}
1575#endif
1576 1590
1577static void 1591static void dump_arp_table(struct lec_priv *priv)
1578dump_arp_table(struct lec_priv *priv)
1579{ 1592{
1580#if DEBUG_ARP_TABLE 1593 struct hlist_node *node;
1581 int i,j, offset; 1594 struct lec_arp_table *rulla;
1582 struct lec_arp_table *rulla; 1595 char buf[256];
1583 char buf[1024]; 1596 int i, j, offset;
1584 struct lec_arp_table **lec_arp_tables = 1597
1585 (struct lec_arp_table **)priv->lec_arp_tables; 1598 printk("Dump %p:\n", priv);
1586 struct lec_arp_table *lec_arp_empty_ones = 1599 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1587 (struct lec_arp_table *)priv->lec_arp_empty_ones; 1600 hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) {
1588 struct lec_arp_table *lec_no_forward = 1601 offset = 0;
1589 (struct lec_arp_table *)priv->lec_no_forward; 1602 offset += sprintf(buf, "%d: %p\n", i, rulla);
1590 struct lec_arp_table *mcast_fwds = priv->mcast_fwds; 1603 offset += sprintf(buf + offset, "Mac:");
1591 1604 for (j = 0; j < ETH_ALEN; j++) {
1592 1605 offset += sprintf(buf + offset,
1593 printk("Dump %p:\n",priv); 1606 "%2.2x ",
1594 for (i=0;i<LEC_ARP_TABLE_SIZE;i++) { 1607 rulla->mac_addr[j] & 0xff);
1595 rulla = lec_arp_tables[i]; 1608 }
1596 offset = 0; 1609 offset += sprintf(buf + offset, "Atm:");
1597 offset += sprintf(buf,"%d: %p\n",i, rulla); 1610 for (j = 0; j < ATM_ESA_LEN; j++) {
1598 while (rulla) { 1611 offset += sprintf(buf + offset,
1599 offset += sprintf(buf+offset,"Mac:"); 1612 "%2.2x ",
1600 for(j=0;j<ETH_ALEN;j++) { 1613 rulla->atm_addr[j] & 0xff);
1601 offset+=sprintf(buf+offset, 1614 }
1602 "%2.2x ", 1615 offset += sprintf(buf + offset,
1603 rulla->mac_addr[j]&0xff); 1616 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
1604 } 1617 rulla->vcc ? rulla->vcc->vpi : 0,
1605 offset +=sprintf(buf+offset,"Atm:"); 1618 rulla->vcc ? rulla->vcc->vci : 0,
1606 for(j=0;j<ATM_ESA_LEN;j++) { 1619 rulla->recv_vcc ? rulla->recv_vcc->
1607 offset+=sprintf(buf+offset, 1620 vpi : 0,
1608 "%2.2x ", 1621 rulla->recv_vcc ? rulla->recv_vcc->
1609 rulla->atm_addr[j]&0xff); 1622 vci : 0, rulla->last_used,
1610 } 1623 rulla->timestamp, rulla->no_tries);
1611 offset+=sprintf(buf+offset, 1624 offset +=
1612 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", 1625 sprintf(buf + offset,
1613 rulla->vcc?rulla->vcc->vpi:0, 1626 "Flags:%x, Packets_flooded:%x, Status: %s ",
1614 rulla->vcc?rulla->vcc->vci:0, 1627 rulla->flags, rulla->packets_flooded,
1615 rulla->recv_vcc?rulla->recv_vcc->vpi:0, 1628 get_status_string(rulla->status));
1616 rulla->recv_vcc?rulla->recv_vcc->vci:0, 1629 printk("%s\n", buf);
1617 rulla->last_used, 1630 }
1618 rulla->timestamp, rulla->no_tries); 1631 }
1619 offset+=sprintf(buf+offset, 1632
1620 "Flags:%x, Packets_flooded:%x, Status: %s ", 1633 if (!hlist_empty(&priv->lec_no_forward))
1621 rulla->flags, rulla->packets_flooded, 1634 printk("No forward\n");
1622 get_status_string(rulla->status)); 1635 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
1623 offset+=sprintf(buf+offset,"->%p\n",rulla->next); 1636 offset = 0;
1624 rulla = rulla->next; 1637 offset += sprintf(buf + offset, "Mac:");
1625 } 1638 for (j = 0; j < ETH_ALEN; j++) {
1626 printk("%s",buf); 1639 offset += sprintf(buf + offset, "%2.2x ",
1627 } 1640 rulla->mac_addr[j] & 0xff);
1628 rulla = lec_no_forward; 1641 }
1629 if (rulla) 1642 offset += sprintf(buf + offset, "Atm:");
1630 printk("No forward\n"); 1643 for (j = 0; j < ATM_ESA_LEN; j++) {
1631 while(rulla) { 1644 offset += sprintf(buf + offset, "%2.2x ",
1632 offset=0; 1645 rulla->atm_addr[j] & 0xff);
1633 offset += sprintf(buf+offset,"Mac:"); 1646 }
1634 for(j=0;j<ETH_ALEN;j++) { 1647 offset += sprintf(buf + offset,
1635 offset+=sprintf(buf+offset,"%2.2x ", 1648 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
1636 rulla->mac_addr[j]&0xff); 1649 rulla->vcc ? rulla->vcc->vpi : 0,
1637 } 1650 rulla->vcc ? rulla->vcc->vci : 0,
1638 offset +=sprintf(buf+offset,"Atm:"); 1651 rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
1639 for(j=0;j<ATM_ESA_LEN;j++) { 1652 rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
1640 offset+=sprintf(buf+offset,"%2.2x ", 1653 rulla->last_used,
1641 rulla->atm_addr[j]&0xff); 1654 rulla->timestamp, rulla->no_tries);
1642 } 1655 offset += sprintf(buf + offset,
1643 offset+=sprintf(buf+offset, 1656 "Flags:%x, Packets_flooded:%x, Status: %s ",
1644 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", 1657 rulla->flags, rulla->packets_flooded,
1645 rulla->vcc?rulla->vcc->vpi:0, 1658 get_status_string(rulla->status));
1646 rulla->vcc?rulla->vcc->vci:0, 1659 printk("%s\n", buf);
1647 rulla->recv_vcc?rulla->recv_vcc->vpi:0, 1660 }
1648 rulla->recv_vcc?rulla->recv_vcc->vci:0, 1661
1649 rulla->last_used, 1662 if (!hlist_empty(&priv->lec_arp_empty_ones))
1650 rulla->timestamp, rulla->no_tries); 1663 printk("Empty ones\n");
1651 offset+=sprintf(buf+offset, 1664 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
1652 "Flags:%x, Packets_flooded:%x, Status: %s ", 1665 offset = 0;
1653 rulla->flags, rulla->packets_flooded, 1666 offset += sprintf(buf + offset, "Mac:");
1654 get_status_string(rulla->status)); 1667 for (j = 0; j < ETH_ALEN; j++) {
1655 offset+=sprintf(buf+offset,"->%lx\n",(long)rulla->next); 1668 offset += sprintf(buf + offset, "%2.2x ",
1656 rulla = rulla->next; 1669 rulla->mac_addr[j] & 0xff);
1657 printk("%s",buf); 1670 }
1658 } 1671 offset += sprintf(buf + offset, "Atm:");
1659 rulla = lec_arp_empty_ones; 1672 for (j = 0; j < ATM_ESA_LEN; j++) {
1660 if (rulla) 1673 offset += sprintf(buf + offset, "%2.2x ",
1661 printk("Empty ones\n"); 1674 rulla->atm_addr[j] & 0xff);
1662 while(rulla) { 1675 }
1663 offset=0; 1676 offset += sprintf(buf + offset,
1664 offset += sprintf(buf+offset,"Mac:"); 1677 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
1665 for(j=0;j<ETH_ALEN;j++) { 1678 rulla->vcc ? rulla->vcc->vpi : 0,
1666 offset+=sprintf(buf+offset,"%2.2x ", 1679 rulla->vcc ? rulla->vcc->vci : 0,
1667 rulla->mac_addr[j]&0xff); 1680 rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
1668 } 1681 rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
1669 offset +=sprintf(buf+offset,"Atm:"); 1682 rulla->last_used,
1670 for(j=0;j<ATM_ESA_LEN;j++) { 1683 rulla->timestamp, rulla->no_tries);
1671 offset+=sprintf(buf+offset,"%2.2x ", 1684 offset += sprintf(buf + offset,
1672 rulla->atm_addr[j]&0xff); 1685 "Flags:%x, Packets_flooded:%x, Status: %s ",
1673 } 1686 rulla->flags, rulla->packets_flooded,
1674 offset+=sprintf(buf+offset, 1687 get_status_string(rulla->status));
1675 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", 1688 printk("%s", buf);
1676 rulla->vcc?rulla->vcc->vpi:0, 1689 }
1677 rulla->vcc?rulla->vcc->vci:0, 1690
1678 rulla->recv_vcc?rulla->recv_vcc->vpi:0, 1691 if (!hlist_empty(&priv->mcast_fwds))
1679 rulla->recv_vcc?rulla->recv_vcc->vci:0, 1692 printk("Multicast Forward VCCs\n");
1680 rulla->last_used, 1693 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
1681 rulla->timestamp, rulla->no_tries); 1694 offset = 0;
1682 offset+=sprintf(buf+offset, 1695 offset += sprintf(buf + offset, "Mac:");
1683 "Flags:%x, Packets_flooded:%x, Status: %s ", 1696 for (j = 0; j < ETH_ALEN; j++) {
1684 rulla->flags, rulla->packets_flooded, 1697 offset += sprintf(buf + offset, "%2.2x ",
1685 get_status_string(rulla->status)); 1698 rulla->mac_addr[j] & 0xff);
1686 offset+=sprintf(buf+offset,"->%lx\n",(long)rulla->next); 1699 }
1687 rulla = rulla->next; 1700 offset += sprintf(buf + offset, "Atm:");
1688 printk("%s",buf); 1701 for (j = 0; j < ATM_ESA_LEN; j++) {
1689 } 1702 offset += sprintf(buf + offset, "%2.2x ",
1690 1703 rulla->atm_addr[j] & 0xff);
1691 rulla = mcast_fwds; 1704 }
1692 if (rulla) 1705 offset += sprintf(buf + offset,
1693 printk("Multicast Forward VCCs\n"); 1706 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
1694 while(rulla) { 1707 rulla->vcc ? rulla->vcc->vpi : 0,
1695 offset=0; 1708 rulla->vcc ? rulla->vcc->vci : 0,
1696 offset += sprintf(buf+offset,"Mac:"); 1709 rulla->recv_vcc ? rulla->recv_vcc->vpi : 0,
1697 for(j=0;j<ETH_ALEN;j++) { 1710 rulla->recv_vcc ? rulla->recv_vcc->vci : 0,
1698 offset+=sprintf(buf+offset,"%2.2x ", 1711 rulla->last_used,
1699 rulla->mac_addr[j]&0xff); 1712 rulla->timestamp, rulla->no_tries);
1700 } 1713 offset += sprintf(buf + offset,
1701 offset +=sprintf(buf+offset,"Atm:"); 1714 "Flags:%x, Packets_flooded:%x, Status: %s ",
1702 for(j=0;j<ATM_ESA_LEN;j++) { 1715 rulla->flags, rulla->packets_flooded,
1703 offset+=sprintf(buf+offset,"%2.2x ", 1716 get_status_string(rulla->status));
1704 rulla->atm_addr[j]&0xff); 1717 printk("%s\n", buf);
1705 } 1718 }
1706 offset+=sprintf(buf+offset,
1707 "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ",
1708 rulla->vcc?rulla->vcc->vpi:0,
1709 rulla->vcc?rulla->vcc->vci:0,
1710 rulla->recv_vcc?rulla->recv_vcc->vpi:0,
1711 rulla->recv_vcc?rulla->recv_vcc->vci:0,
1712 rulla->last_used,
1713 rulla->timestamp, rulla->no_tries);
1714 offset+=sprintf(buf+offset,
1715 "Flags:%x, Packets_flooded:%x, Status: %s ",
1716 rulla->flags, rulla->packets_flooded,
1717 get_status_string(rulla->status));
1718 offset+=sprintf(buf+offset,"->%lx\n",(long)rulla->next);
1719 rulla = rulla->next;
1720 printk("%s",buf);
1721 }
1722 1719
1723#endif
1724} 1720}
1721#else
1722#define dump_arp_table(priv) do { } while (0)
1723#endif
1725 1724
1726/* 1725/*
1727 * Destruction of arp-cache 1726 * Destruction of arp-cache
1728 */ 1727 */
1729static void 1728static void lec_arp_destroy(struct lec_priv *priv)
1730lec_arp_destroy(struct lec_priv *priv)
1731{ 1729{
1732 unsigned long flags; 1730 unsigned long flags;
1733 struct lec_arp_table *entry, *next; 1731 struct hlist_node *node, *next;
1734 int i; 1732 struct lec_arp_table *entry;
1733 int i;
1734
1735 cancel_rearming_delayed_work(&priv->lec_arp_work);
1735 1736
1736 del_timer_sync(&priv->lec_arp_timer); 1737 /*
1737 1738 * Remove all entries
1738 /* 1739 */
1739 * Remove all entries
1740 */
1741 1740
1742 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1741 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1743 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1742 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1744 for(entry = priv->lec_arp_tables[i]; entry != NULL; entry=next) { 1743 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
1745 next = entry->next; 1744 lec_arp_remove(priv, entry);
1746 lec_arp_remove(priv, entry); 1745 lec_arp_put(entry);
1747 kfree(entry); 1746 }
1748 } 1747 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1749 } 1748 }
1750 entry = priv->lec_arp_empty_ones; 1749
1751 while(entry) { 1750 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
1752 next = entry->next; 1751 del_timer_sync(&entry->timer);
1753 del_timer_sync(&entry->timer); 1752 lec_arp_clear_vccs(entry);
1754 lec_arp_clear_vccs(entry); 1753 hlist_del(&entry->next);
1755 kfree(entry); 1754 lec_arp_put(entry);
1756 entry = next; 1755 }
1757 } 1756 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1758 priv->lec_arp_empty_ones = NULL; 1757
1759 entry = priv->lec_no_forward; 1758 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) {
1760 while(entry) { 1759 del_timer_sync(&entry->timer);
1761 next = entry->next; 1760 lec_arp_clear_vccs(entry);
1762 del_timer_sync(&entry->timer); 1761 hlist_del(&entry->next);
1763 lec_arp_clear_vccs(entry); 1762 lec_arp_put(entry);
1764 kfree(entry); 1763 }
1765 entry = next; 1764 INIT_HLIST_HEAD(&priv->lec_no_forward);
1766 } 1765
1767 priv->lec_no_forward = NULL; 1766 hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
1768 entry = priv->mcast_fwds; 1767 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
1769 while(entry) { 1768 lec_arp_clear_vccs(entry);
1770 next = entry->next; 1769 hlist_del(&entry->next);
1771 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ 1770 lec_arp_put(entry);
1772 lec_arp_clear_vccs(entry); 1771 }
1773 kfree(entry); 1772 INIT_HLIST_HEAD(&priv->mcast_fwds);
1774 entry = next; 1773 priv->mcast_vcc = NULL;
1775 }
1776 priv->mcast_fwds = NULL;
1777 priv->mcast_vcc = NULL;
1778 memset(priv->lec_arp_tables, 0,
1779 sizeof(struct lec_arp_table *) * LEC_ARP_TABLE_SIZE);
1780 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 1774 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1781} 1775}
1782 1776
1783
1784/* 1777/*
1785 * Find entry by mac_address 1778 * Find entry by mac_address
1786 */ 1779 */
1787static struct lec_arp_table* 1780static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1788lec_arp_find(struct lec_priv *priv, 1781 unsigned char *mac_addr)
1789 unsigned char *mac_addr)
1790{ 1782{
1791 unsigned short place; 1783 struct hlist_node *node;
1792 struct lec_arp_table *to_return; 1784 struct hlist_head *head;
1793 1785 struct lec_arp_table *entry;
1794 DPRINTK("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1786
1795 mac_addr[0]&0xff, mac_addr[1]&0xff, mac_addr[2]&0xff, 1787 DPRINTK("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
1796 mac_addr[3]&0xff, mac_addr[4]&0xff, mac_addr[5]&0xff); 1788 mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff,
1797 place = HASH(mac_addr[ETH_ALEN-1]); 1789 mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff);
1798 1790
1799 to_return = priv->lec_arp_tables[place]; 1791 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1800 while(to_return) { 1792 hlist_for_each_entry(entry, node, head, next) {
1801 if (!compare_ether_addr(mac_addr, to_return->mac_addr)) { 1793 if (!compare_ether_addr(mac_addr, entry->mac_addr)) {
1802 return to_return; 1794 return entry;
1803 } 1795 }
1804 to_return = to_return->next; 1796 }
1805 } 1797 return NULL;
1806 return NULL;
1807} 1798}
1808 1799
1809static struct lec_arp_table* 1800static struct lec_arp_table *make_entry(struct lec_priv *priv,
1810make_entry(struct lec_priv *priv, unsigned char *mac_addr) 1801 unsigned char *mac_addr)
1811{ 1802{
1812 struct lec_arp_table *to_return; 1803 struct lec_arp_table *to_return;
1813 1804
1814 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1805 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1815 if (!to_return) { 1806 if (!to_return) {
1816 printk("LEC: Arp entry kmalloc failed\n"); 1807 printk("LEC: Arp entry kmalloc failed\n");
1817 return NULL; 1808 return NULL;
1818 } 1809 }
1819 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1810 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
1820 init_timer(&to_return->timer); 1811 INIT_HLIST_NODE(&to_return->next);
1821 to_return->timer.function = lec_arp_expire_arp; 1812 init_timer(&to_return->timer);
1822 to_return->timer.data = (unsigned long) to_return; 1813 to_return->timer.function = lec_arp_expire_arp;
1823 to_return->last_used = jiffies; 1814 to_return->timer.data = (unsigned long)to_return;
1824 to_return->priv = priv; 1815 to_return->last_used = jiffies;
1825 skb_queue_head_init(&to_return->tx_wait); 1816 to_return->priv = priv;
1826 return to_return; 1817 skb_queue_head_init(&to_return->tx_wait);
1818 atomic_set(&to_return->usage, 1);
1819 return to_return;
1827} 1820}
1828 1821
1829/* 1822/* Arp sent timer expired */
1830 * 1823static void lec_arp_expire_arp(unsigned long data)
1831 * Arp sent timer expired
1832 *
1833 */
1834static void
1835lec_arp_expire_arp(unsigned long data)
1836{ 1824{
1837 struct lec_arp_table *entry; 1825 struct lec_arp_table *entry;
1838 1826
1839 entry = (struct lec_arp_table *)data; 1827 entry = (struct lec_arp_table *)data;
1840 1828
1841 DPRINTK("lec_arp_expire_arp\n"); 1829 DPRINTK("lec_arp_expire_arp\n");
1842 if (entry->status == ESI_ARP_PENDING) { 1830 if (entry->status == ESI_ARP_PENDING) {
1843 if (entry->no_tries <= entry->priv->max_retry_count) { 1831 if (entry->no_tries <= entry->priv->max_retry_count) {
1844 if (entry->is_rdesc) 1832 if (entry->is_rdesc)
1845 send_to_lecd(entry->priv, l_rdesc_arp_xmt, entry->mac_addr, NULL, NULL); 1833 send_to_lecd(entry->priv, l_rdesc_arp_xmt,
1846 else 1834 entry->mac_addr, NULL, NULL);
1847 send_to_lecd(entry->priv, l_arp_xmt, entry->mac_addr, NULL, NULL); 1835 else
1848 entry->no_tries++; 1836 send_to_lecd(entry->priv, l_arp_xmt,
1849 } 1837 entry->mac_addr, NULL, NULL);
1850 mod_timer(&entry->timer, jiffies + (1*HZ)); 1838 entry->no_tries++;
1851 } 1839 }
1840 mod_timer(&entry->timer, jiffies + (1 * HZ));
1841 }
1852} 1842}
1853 1843
1854/* 1844/* Unknown/unused vcc expire, remove associated entry */
1855 * 1845static void lec_arp_expire_vcc(unsigned long data)
1856 * Unknown/unused vcc expire, remove associated entry
1857 *
1858 */
1859static void
1860lec_arp_expire_vcc(unsigned long data)
1861{ 1846{
1862 unsigned long flags; 1847 unsigned long flags;
1863 struct lec_arp_table *to_remove = (struct lec_arp_table*)data; 1848 struct lec_arp_table *to_remove = (struct lec_arp_table *)data;
1864 struct lec_priv *priv = (struct lec_priv *)to_remove->priv; 1849 struct lec_priv *priv = (struct lec_priv *)to_remove->priv;
1865 struct lec_arp_table *entry = NULL;
1866 1850
1867 del_timer(&to_remove->timer); 1851 del_timer(&to_remove->timer);
1868 1852
1869 DPRINTK("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", 1853 DPRINTK("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n",
1870 to_remove, priv, 1854 to_remove, priv,
1871 to_remove->vcc?to_remove->recv_vcc->vpi:0, 1855 to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
1872 to_remove->vcc?to_remove->recv_vcc->vci:0); 1856 to_remove->vcc ? to_remove->recv_vcc->vci : 0);
1873 DPRINTK("eo:%p nf:%p\n",priv->lec_arp_empty_ones,priv->lec_no_forward);
1874 1857
1875 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1858 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1876 if (to_remove == priv->lec_arp_empty_ones) 1859 hlist_del(&to_remove->next);
1877 priv->lec_arp_empty_ones = to_remove->next;
1878 else {
1879 entry = priv->lec_arp_empty_ones;
1880 while (entry && entry->next != to_remove)
1881 entry = entry->next;
1882 if (entry)
1883 entry->next = to_remove->next;
1884 }
1885 if (!entry) {
1886 if (to_remove == priv->lec_no_forward) {
1887 priv->lec_no_forward = to_remove->next;
1888 } else {
1889 entry = priv->lec_no_forward;
1890 while (entry && entry->next != to_remove)
1891 entry = entry->next;
1892 if (entry)
1893 entry->next = to_remove->next;
1894 }
1895 }
1896 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 1860 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1897 1861
1898 lec_arp_clear_vccs(to_remove); 1862 lec_arp_clear_vccs(to_remove);
1899 kfree(to_remove); 1863 lec_arp_put(to_remove);
1900} 1864}
1901 1865
1902/* 1866/*
@@ -1915,158 +1879,171 @@ lec_arp_expire_vcc(unsigned long data)
1915 * to ESI_FORWARD_DIRECT. This causes the flush period to end 1879 * to ESI_FORWARD_DIRECT. This causes the flush period to end
1916 * regardless of the progress of the flush protocol. 1880 * regardless of the progress of the flush protocol.
1917 */ 1881 */
1918static void 1882static void lec_arp_check_expire(void *data)
1919lec_arp_check_expire(unsigned long data)
1920{ 1883{
1921 unsigned long flags; 1884 unsigned long flags;
1922 struct lec_priv *priv = (struct lec_priv *)data; 1885 struct lec_priv *priv = data;
1923 struct lec_arp_table *entry, *next; 1886 struct hlist_node *node, *next;
1924 unsigned long now; 1887 struct lec_arp_table *entry;
1925 unsigned long time_to_check; 1888 unsigned long now;
1926 int i; 1889 unsigned long time_to_check;
1927 1890 int i;
1928 DPRINTK("lec_arp_check_expire %p\n",priv); 1891
1929 DPRINTK("expire: eo:%p nf:%p\n",priv->lec_arp_empty_ones, 1892 DPRINTK("lec_arp_check_expire %p\n", priv);
1930 priv->lec_no_forward);
1931 now = jiffies; 1893 now = jiffies;
1894restart:
1932 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1895 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1933 for(i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1896 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1934 for(entry = priv->lec_arp_tables[i]; entry != NULL; ) { 1897 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
1935 if ((entry->flags) & LEC_REMOTE_FLAG && 1898 if ((entry->flags) & LEC_REMOTE_FLAG &&
1936 priv->topology_change) 1899 priv->topology_change)
1937 time_to_check = priv->forward_delay_time; 1900 time_to_check = priv->forward_delay_time;
1938 else 1901 else
1939 time_to_check = priv->aging_time; 1902 time_to_check = priv->aging_time;
1940 1903
1941 DPRINTK("About to expire: %lx - %lx > %lx\n", 1904 DPRINTK("About to expire: %lx - %lx > %lx\n",
1942 now,entry->last_used, time_to_check); 1905 now, entry->last_used, time_to_check);
1943 if( time_after(now, entry->last_used+ 1906 if (time_after(now, entry->last_used + time_to_check)
1944 time_to_check) && 1907 && !(entry->flags & LEC_PERMANENT_FLAG)
1945 !(entry->flags & LEC_PERMANENT_FLAG) && 1908 && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
1946 !(entry->mac_addr[0] & 0x01) ) { /* LANE2: 7.1.20 */
1947 /* Remove entry */ 1909 /* Remove entry */
1948 DPRINTK("LEC:Entry timed out\n"); 1910 DPRINTK("LEC:Entry timed out\n");
1949 next = entry->next;
1950 lec_arp_remove(priv, entry); 1911 lec_arp_remove(priv, entry);
1951 kfree(entry); 1912 lec_arp_put(entry);
1952 entry = next;
1953 } else { 1913 } else {
1954 /* Something else */ 1914 /* Something else */
1955 if ((entry->status == ESI_VC_PENDING || 1915 if ((entry->status == ESI_VC_PENDING ||
1956 entry->status == ESI_ARP_PENDING) 1916 entry->status == ESI_ARP_PENDING)
1957 && time_after_eq(now, 1917 && time_after_eq(now,
1958 entry->timestamp + 1918 entry->timestamp +
1959 priv->max_unknown_frame_time)) { 1919 priv->
1920 max_unknown_frame_time)) {
1960 entry->timestamp = jiffies; 1921 entry->timestamp = jiffies;
1961 entry->packets_flooded = 0; 1922 entry->packets_flooded = 0;
1962 if (entry->status == ESI_VC_PENDING) 1923 if (entry->status == ESI_VC_PENDING)
1963 send_to_lecd(priv, l_svc_setup, entry->mac_addr, entry->atm_addr, NULL); 1924 send_to_lecd(priv, l_svc_setup,
1925 entry->mac_addr,
1926 entry->atm_addr,
1927 NULL);
1964 } 1928 }
1965 if (entry->status == ESI_FLUSH_PENDING 1929 if (entry->status == ESI_FLUSH_PENDING
1966 && 1930 &&
1967 time_after_eq(now, entry->timestamp+ 1931 time_after_eq(now, entry->timestamp +
1968 priv->path_switching_delay)) { 1932 priv->path_switching_delay)) {
1969 struct sk_buff *skb; 1933 struct sk_buff *skb;
1934 struct atm_vcc *vcc = entry->vcc;
1970 1935
1936 lec_arp_hold(entry);
1937 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1971 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 1938 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
1972 lec_send(entry->vcc, skb, entry->priv); 1939 lec_send(vcc, skb, entry->priv);
1973 entry->last_used = jiffies; 1940 entry->last_used = jiffies;
1974 entry->status = 1941 entry->status = ESI_FORWARD_DIRECT;
1975 ESI_FORWARD_DIRECT; 1942 lec_arp_put(entry);
1943 goto restart;
1976 } 1944 }
1977 entry = entry->next;
1978 } 1945 }
1979 } 1946 }
1980 } 1947 }
1981 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 1948 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1982 1949
1983 mod_timer(&priv->lec_arp_timer, jiffies + LEC_ARP_REFRESH_INTERVAL); 1950 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
1984} 1951}
1952
1985/* 1953/*
1986 * Try to find vcc where mac_address is attached. 1954 * Try to find vcc where mac_address is attached.
1987 * 1955 *
1988 */ 1956 */
1989static struct atm_vcc* 1957static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1990lec_arp_resolve(struct lec_priv *priv, unsigned char *mac_to_find, 1958 unsigned char *mac_to_find, int is_rdesc,
1991 int is_rdesc, struct lec_arp_table **ret_entry) 1959 struct lec_arp_table **ret_entry)
1992{ 1960{
1993 unsigned long flags; 1961 unsigned long flags;
1994 struct lec_arp_table *entry; 1962 struct lec_arp_table *entry;
1995 struct atm_vcc *found; 1963 struct atm_vcc *found;
1996 1964
1997 if (mac_to_find[0] & 0x01) { 1965 if (mac_to_find[0] & 0x01) {
1998 switch (priv->lane_version) { 1966 switch (priv->lane_version) {
1999 case 1: 1967 case 1:
2000 return priv->mcast_vcc; 1968 return priv->mcast_vcc;
2001 break; 1969 break;
2002 case 2: /* LANE2 wants arp for multicast addresses */ 1970 case 2: /* LANE2 wants arp for multicast addresses */
2003 if (!compare_ether_addr(mac_to_find, bus_mac)) 1971 if (!compare_ether_addr(mac_to_find, bus_mac))
2004 return priv->mcast_vcc; 1972 return priv->mcast_vcc;
2005 break; 1973 break;
2006 default: 1974 default:
2007 break; 1975 break;
2008 } 1976 }
2009 } 1977 }
2010 1978
2011 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1979 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2012 entry = lec_arp_find(priv, mac_to_find); 1980 entry = lec_arp_find(priv, mac_to_find);
2013 1981
2014 if (entry) { 1982 if (entry) {
2015 if (entry->status == ESI_FORWARD_DIRECT) { 1983 if (entry->status == ESI_FORWARD_DIRECT) {
2016 /* Connection Ok */ 1984 /* Connection Ok */
2017 entry->last_used = jiffies; 1985 entry->last_used = jiffies;
2018 *ret_entry = entry; 1986 lec_arp_hold(entry);
2019 found = entry->vcc; 1987 *ret_entry = entry;
1988 found = entry->vcc;
2020 goto out; 1989 goto out;
2021 } 1990 }
2022 /* If the LE_ARP cache entry is still pending, reset count to 0 1991 /*
1992 * If the LE_ARP cache entry is still pending, reset count to 0
2023 * so another LE_ARP request can be made for this frame. 1993 * so another LE_ARP request can be made for this frame.
2024 */ 1994 */
2025 if (entry->status == ESI_ARP_PENDING) { 1995 if (entry->status == ESI_ARP_PENDING) {
2026 entry->no_tries = 0; 1996 entry->no_tries = 0;
2027 } 1997 }
2028 /* Data direct VC not yet set up, check to see if the unknown 1998 /*
2029 frame count is greater than the limit. If the limit has 1999 * Data direct VC not yet set up, check to see if the unknown
2030 not been reached, allow the caller to send packet to 2000 * frame count is greater than the limit. If the limit has
2031 BUS. */ 2001 * not been reached, allow the caller to send packet to
2032 if (entry->status != ESI_FLUSH_PENDING && 2002 * BUS.
2033 entry->packets_flooded<priv->maximum_unknown_frame_count) { 2003 */
2034 entry->packets_flooded++; 2004 if (entry->status != ESI_FLUSH_PENDING &&
2035 DPRINTK("LEC_ARP: Flooding..\n"); 2005 entry->packets_flooded <
2036 found = priv->mcast_vcc; 2006 priv->maximum_unknown_frame_count) {
2007 entry->packets_flooded++;
2008 DPRINTK("LEC_ARP: Flooding..\n");
2009 found = priv->mcast_vcc;
2037 goto out; 2010 goto out;
2038 } 2011 }
2039 /* We got here because entry->status == ESI_FLUSH_PENDING 2012 /*
2013 * We got here because entry->status == ESI_FLUSH_PENDING
2040 * or BUS flood limit was reached for an entry which is 2014 * or BUS flood limit was reached for an entry which is
2041 * in ESI_ARP_PENDING or ESI_VC_PENDING state. 2015 * in ESI_ARP_PENDING or ESI_VC_PENDING state.
2042 */ 2016 */
2043 *ret_entry = entry; 2017 lec_arp_hold(entry);
2044 DPRINTK("lec: entry->status %d entry->vcc %p\n", entry->status, entry->vcc); 2018 *ret_entry = entry;
2045 found = NULL; 2019 DPRINTK("lec: entry->status %d entry->vcc %p\n", entry->status,
2046 } else { 2020 entry->vcc);
2047 /* No matching entry was found */ 2021 found = NULL;
2048 entry = make_entry(priv, mac_to_find); 2022 } else {
2049 DPRINTK("LEC_ARP: Making entry\n"); 2023 /* No matching entry was found */
2050 if (!entry) { 2024 entry = make_entry(priv, mac_to_find);
2051 found = priv->mcast_vcc; 2025 DPRINTK("LEC_ARP: Making entry\n");
2026 if (!entry) {
2027 found = priv->mcast_vcc;
2052 goto out; 2028 goto out;
2053 } 2029 }
2054 lec_arp_add(priv, entry); 2030 lec_arp_add(priv, entry);
2055 /* We want arp-request(s) to be sent */ 2031 /* We want arp-request(s) to be sent */
2056 entry->packets_flooded =1; 2032 entry->packets_flooded = 1;
2057 entry->status = ESI_ARP_PENDING; 2033 entry->status = ESI_ARP_PENDING;
2058 entry->no_tries = 1; 2034 entry->no_tries = 1;
2059 entry->last_used = entry->timestamp = jiffies; 2035 entry->last_used = entry->timestamp = jiffies;
2060 entry->is_rdesc = is_rdesc; 2036 entry->is_rdesc = is_rdesc;
2061 if (entry->is_rdesc) 2037 if (entry->is_rdesc)
2062 send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL, NULL); 2038 send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL,
2063 else 2039 NULL);
2064 send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL); 2040 else
2065 entry->timer.expires = jiffies + (1*HZ); 2041 send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
2066 entry->timer.function = lec_arp_expire_arp; 2042 entry->timer.expires = jiffies + (1 * HZ);
2067 add_timer(&entry->timer); 2043 entry->timer.function = lec_arp_expire_arp;
2068 found = priv->mcast_vcc; 2044 add_timer(&entry->timer);
2069 } 2045 found = priv->mcast_vcc;
2046 }
2070 2047
2071out: 2048out:
2072 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2049 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
@@ -2074,30 +2051,30 @@ out:
2074} 2051}
2075 2052
2076static int 2053static int
2077lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, 2054lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr,
2078 unsigned long permanent) 2055 unsigned long permanent)
2079{ 2056{
2080 unsigned long flags; 2057 unsigned long flags;
2081 struct lec_arp_table *entry, *next; 2058 struct hlist_node *node, *next;
2082 int i; 2059 struct lec_arp_table *entry;
2060 int i;
2083 2061
2084 DPRINTK("lec_addr_delete\n"); 2062 DPRINTK("lec_addr_delete\n");
2085 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2063 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2086 for(i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2064 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2087 for(entry = priv->lec_arp_tables[i]; entry != NULL; entry = next) { 2065 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
2088 next = entry->next; 2066 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)
2089 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) 2067 && (permanent ||
2090 && (permanent || 2068 !(entry->flags & LEC_PERMANENT_FLAG))) {
2091 !(entry->flags & LEC_PERMANENT_FLAG))) {
2092 lec_arp_remove(priv, entry); 2069 lec_arp_remove(priv, entry);
2093 kfree(entry); 2070 lec_arp_put(entry);
2094 } 2071 }
2095 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2072 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2096 return 0; 2073 return 0;
2097 } 2074 }
2098 } 2075 }
2099 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2076 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2100 return -1; 2077 return -1;
2101} 2078}
2102 2079
2103/* 2080/*
@@ -2105,109 +2082,98 @@ lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr,
2105 */ 2082 */
2106static void 2083static void
2107lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, 2084lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr,
2108 unsigned char *atm_addr, unsigned long remoteflag, 2085 unsigned char *atm_addr, unsigned long remoteflag,
2109 unsigned int targetless_le_arp) 2086 unsigned int targetless_le_arp)
2110{ 2087{
2111 unsigned long flags; 2088 unsigned long flags;
2112 struct lec_arp_table *entry, *tmp; 2089 struct hlist_node *node, *next;
2113 int i; 2090 struct lec_arp_table *entry, *tmp;
2091 int i;
2114 2092
2115 DPRINTK("lec:%s", (targetless_le_arp) ? "targetless ": " "); 2093 DPRINTK("lec:%s", (targetless_le_arp) ? "targetless " : " ");
2116 DPRINTK("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2094 DPRINTK("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2117 mac_addr[0],mac_addr[1],mac_addr[2],mac_addr[3], 2095 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
2118 mac_addr[4],mac_addr[5]); 2096 mac_addr[4], mac_addr[5]);
2119 2097
2120 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2098 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2121 entry = lec_arp_find(priv, mac_addr); 2099 entry = lec_arp_find(priv, mac_addr);
2122 if (entry == NULL && targetless_le_arp) 2100 if (entry == NULL && targetless_le_arp)
2123 goto out; /* LANE2: ignore targetless LE_ARPs for which 2101 goto out; /*
2124 * we have no entry in the cache. 7.1.30 2102 * LANE2: ignore targetless LE_ARPs for which
2125 */ 2103 * we have no entry in the cache. 7.1.30
2126 if (priv->lec_arp_empty_ones) { 2104 */
2127 entry = priv->lec_arp_empty_ones; 2105 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
2128 if (!memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN)) { 2106 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
2129 priv->lec_arp_empty_ones = entry->next; 2107 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
2130 } else { 2108 hlist_del(&entry->next);
2131 while(entry->next && memcmp(entry->next->atm_addr, 2109 del_timer(&entry->timer);
2132 atm_addr, ATM_ESA_LEN)) 2110 tmp = lec_arp_find(priv, mac_addr);
2133 entry = entry->next; 2111 if (tmp) {
2134 if (entry->next) { 2112 del_timer(&tmp->timer);
2135 tmp = entry; 2113 tmp->status = ESI_FORWARD_DIRECT;
2136 entry = entry->next; 2114 memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN);
2137 tmp->next = entry->next; 2115 tmp->vcc = entry->vcc;
2138 } else 2116 tmp->old_push = entry->old_push;
2139 entry = NULL; 2117 tmp->last_used = jiffies;
2140 2118 del_timer(&entry->timer);
2141 } 2119 lec_arp_put(entry);
2142 if (entry) { 2120 entry = tmp;
2143 del_timer(&entry->timer); 2121 } else {
2144 tmp = lec_arp_find(priv, mac_addr); 2122 entry->status = ESI_FORWARD_DIRECT;
2145 if (tmp) { 2123 memcpy(entry->mac_addr, mac_addr, ETH_ALEN);
2146 del_timer(&tmp->timer); 2124 entry->last_used = jiffies;
2147 tmp->status = ESI_FORWARD_DIRECT; 2125 lec_arp_add(priv, entry);
2148 memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN); 2126 }
2149 tmp->vcc = entry->vcc; 2127 if (remoteflag)
2150 tmp->old_push = entry->old_push; 2128 entry->flags |= LEC_REMOTE_FLAG;
2151 tmp->last_used = jiffies; 2129 else
2152 del_timer(&entry->timer); 2130 entry->flags &= ~LEC_REMOTE_FLAG;
2153 kfree(entry); 2131 DPRINTK("After update\n");
2154 entry=tmp; 2132 dump_arp_table(priv);
2155 } else { 2133 goto out;
2156 entry->status = ESI_FORWARD_DIRECT; 2134 }
2157 memcpy(entry->mac_addr, mac_addr, ETH_ALEN); 2135 }
2158 entry->last_used = jiffies; 2136 }
2159 lec_arp_add(priv, entry); 2137
2160 } 2138 entry = lec_arp_find(priv, mac_addr);
2161 if (remoteflag) 2139 if (!entry) {
2162 entry->flags|=LEC_REMOTE_FLAG; 2140 entry = make_entry(priv, mac_addr);
2163 else 2141 if (!entry)
2164 entry->flags&=~LEC_REMOTE_FLAG;
2165 DPRINTK("After update\n");
2166 dump_arp_table(priv);
2167 goto out;
2168 }
2169 }
2170 entry = lec_arp_find(priv, mac_addr);
2171 if (!entry) {
2172 entry = make_entry(priv, mac_addr);
2173 if (!entry)
2174 goto out; 2142 goto out;
2175 entry->status = ESI_UNKNOWN; 2143 entry->status = ESI_UNKNOWN;
2176 lec_arp_add(priv, entry); 2144 lec_arp_add(priv, entry);
2177 /* Temporary, changes before end of function */ 2145 /* Temporary, changes before end of function */
2178 } 2146 }
2179 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 2147 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
2180 del_timer(&entry->timer); 2148 del_timer(&entry->timer);
2181 for(i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2149 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2182 for(tmp = priv->lec_arp_tables[i]; tmp; tmp=tmp->next) { 2150 hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) {
2183 if (entry != tmp && 2151 if (entry != tmp &&
2184 !memcmp(tmp->atm_addr, atm_addr, 2152 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
2185 ATM_ESA_LEN)) { 2153 /* Vcc to this host exists */
2186 /* Vcc to this host exists */ 2154 if (tmp->status > ESI_VC_PENDING) {
2187 if (tmp->status > ESI_VC_PENDING) { 2155 /*
2188 /* 2156 * ESI_FLUSH_PENDING,
2189 * ESI_FLUSH_PENDING, 2157 * ESI_FORWARD_DIRECT
2190 * ESI_FORWARD_DIRECT 2158 */
2191 */ 2159 entry->vcc = tmp->vcc;
2192 entry->vcc = tmp->vcc; 2160 entry->old_push = tmp->old_push;
2193 entry->old_push=tmp->old_push; 2161 }
2194 } 2162 entry->status = tmp->status;
2195 entry->status=tmp->status; 2163 break;
2196 break; 2164 }
2197 } 2165 }
2198 } 2166 }
2199 } 2167 if (remoteflag)
2200 if (remoteflag) 2168 entry->flags |= LEC_REMOTE_FLAG;
2201 entry->flags|=LEC_REMOTE_FLAG; 2169 else
2202 else 2170 entry->flags &= ~LEC_REMOTE_FLAG;
2203 entry->flags&=~LEC_REMOTE_FLAG; 2171 if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) {
2204 if (entry->status == ESI_ARP_PENDING || 2172 entry->status = ESI_VC_PENDING;
2205 entry->status == ESI_UNKNOWN) { 2173 send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL);
2206 entry->status = ESI_VC_PENDING; 2174 }
2207 send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); 2175 DPRINTK("After update2\n");
2208 } 2176 dump_arp_table(priv);
2209 DPRINTK("After update2\n");
2210 dump_arp_table(priv);
2211out: 2177out:
2212 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2178 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2213} 2179}
@@ -2217,299 +2183,299 @@ out:
2217 */ 2183 */
2218static void 2184static void
2219lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 2185lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data,
2220 struct atm_vcc *vcc, 2186 struct atm_vcc *vcc,
2221 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb)) 2187 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
2222{ 2188{
2223 unsigned long flags; 2189 unsigned long flags;
2224 struct lec_arp_table *entry; 2190 struct hlist_node *node;
2225 int i, found_entry=0; 2191 struct lec_arp_table *entry;
2192 int i, found_entry = 0;
2226 2193
2227 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2194 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2228 if (ioc_data->receive == 2) { 2195 if (ioc_data->receive == 2) {
2229 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */ 2196 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2230 2197
2231 DPRINTK("LEC_ARP: Attaching mcast forward\n"); 2198 DPRINTK("LEC_ARP: Attaching mcast forward\n");
2232#if 0 2199#if 0
2233 entry = lec_arp_find(priv, bus_mac); 2200 entry = lec_arp_find(priv, bus_mac);
2234 if (!entry) { 2201 if (!entry) {
2235 printk("LEC_ARP: Multicast entry not found!\n"); 2202 printk("LEC_ARP: Multicast entry not found!\n");
2236 goto out; 2203 goto out;
2237 } 2204 }
2238 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2205 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
2239 entry->recv_vcc = vcc; 2206 entry->recv_vcc = vcc;
2240 entry->old_recv_push = old_push; 2207 entry->old_recv_push = old_push;
2241#endif 2208#endif
2242 entry = make_entry(priv, bus_mac); 2209 entry = make_entry(priv, bus_mac);
2243 if (entry == NULL) 2210 if (entry == NULL)
2244 goto out; 2211 goto out;
2245 del_timer(&entry->timer); 2212 del_timer(&entry->timer);
2246 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2213 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
2247 entry->recv_vcc = vcc; 2214 entry->recv_vcc = vcc;
2248 entry->old_recv_push = old_push; 2215 entry->old_recv_push = old_push;
2249 entry->next = priv->mcast_fwds; 2216 hlist_add_head(&entry->next, &priv->mcast_fwds);
2250 priv->mcast_fwds = entry; 2217 goto out;
2251 goto out; 2218 } else if (ioc_data->receive == 1) {
2252 } else if (ioc_data->receive == 1) { 2219 /*
2253 /* Vcc which we don't want to make default vcc, attach it 2220 * Vcc which we don't want to make default vcc,
2254 anyway. */ 2221 * attach it anyway.
2255 DPRINTK("LEC_ARP:Attaching data direct, not default :%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2222 */
2256 ioc_data->atm_addr[0],ioc_data->atm_addr[1], 2223 DPRINTK
2257 ioc_data->atm_addr[2],ioc_data->atm_addr[3], 2224 ("LEC_ARP:Attaching data direct, not default: "
2258 ioc_data->atm_addr[4],ioc_data->atm_addr[5], 2225 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2259 ioc_data->atm_addr[6],ioc_data->atm_addr[7], 2226 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2260 ioc_data->atm_addr[8],ioc_data->atm_addr[9], 2227 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2261 ioc_data->atm_addr[10],ioc_data->atm_addr[11], 2228 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2262 ioc_data->atm_addr[12],ioc_data->atm_addr[13], 2229 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2263 ioc_data->atm_addr[14],ioc_data->atm_addr[15], 2230 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2264 ioc_data->atm_addr[16],ioc_data->atm_addr[17], 2231 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2265 ioc_data->atm_addr[18],ioc_data->atm_addr[19]); 2232 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2266 entry = make_entry(priv, bus_mac); 2233 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2267 if (entry == NULL) 2234 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2235 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2236 entry = make_entry(priv, bus_mac);
2237 if (entry == NULL)
2268 goto out; 2238 goto out;
2269 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2239 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
2270 memset(entry->mac_addr, 0, ETH_ALEN); 2240 memset(entry->mac_addr, 0, ETH_ALEN);
2271 entry->recv_vcc = vcc; 2241 entry->recv_vcc = vcc;
2272 entry->old_recv_push = old_push; 2242 entry->old_recv_push = old_push;
2273 entry->status = ESI_UNKNOWN; 2243 entry->status = ESI_UNKNOWN;
2274 entry->timer.expires = jiffies + priv->vcc_timeout_period; 2244 entry->timer.expires = jiffies + priv->vcc_timeout_period;
2275 entry->timer.function = lec_arp_expire_vcc; 2245 entry->timer.function = lec_arp_expire_vcc;
2276 add_timer(&entry->timer); 2246 hlist_add_head(&entry->next, &priv->lec_no_forward);
2277 entry->next = priv->lec_no_forward; 2247 add_timer(&entry->timer);
2278 priv->lec_no_forward = entry;
2279 dump_arp_table(priv); 2248 dump_arp_table(priv);
2280 goto out; 2249 goto out;
2281 } 2250 }
2282 DPRINTK("LEC_ARP:Attaching data direct, default:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2251 DPRINTK
2283 ioc_data->atm_addr[0],ioc_data->atm_addr[1], 2252 ("LEC_ARP:Attaching data direct, default: "
2284 ioc_data->atm_addr[2],ioc_data->atm_addr[3], 2253 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2285 ioc_data->atm_addr[4],ioc_data->atm_addr[5], 2254 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2286 ioc_data->atm_addr[6],ioc_data->atm_addr[7], 2255 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2287 ioc_data->atm_addr[8],ioc_data->atm_addr[9], 2256 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2288 ioc_data->atm_addr[10],ioc_data->atm_addr[11], 2257 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2289 ioc_data->atm_addr[12],ioc_data->atm_addr[13], 2258 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2290 ioc_data->atm_addr[14],ioc_data->atm_addr[15], 2259 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2291 ioc_data->atm_addr[16],ioc_data->atm_addr[17], 2260 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2292 ioc_data->atm_addr[18],ioc_data->atm_addr[19]); 2261 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2293 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2262 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2294 for (entry = priv->lec_arp_tables[i]; entry; entry=entry->next) { 2263 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2295 if (memcmp(ioc_data->atm_addr, entry->atm_addr, 2264 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2296 ATM_ESA_LEN)==0) { 2265 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
2297 DPRINTK("LEC_ARP: Attaching data direct\n"); 2266 if (memcmp
2298 DPRINTK("Currently -> Vcc: %d, Rvcc:%d\n", 2267 (ioc_data->atm_addr, entry->atm_addr,
2299 entry->vcc?entry->vcc->vci:0, 2268 ATM_ESA_LEN) == 0) {
2300 entry->recv_vcc?entry->recv_vcc->vci:0); 2269 DPRINTK("LEC_ARP: Attaching data direct\n");
2301 found_entry=1; 2270 DPRINTK("Currently -> Vcc: %d, Rvcc:%d\n",
2302 del_timer(&entry->timer); 2271 entry->vcc ? entry->vcc->vci : 0,
2303 entry->vcc = vcc; 2272 entry->recv_vcc ? entry->recv_vcc->
2304 entry->old_push = old_push; 2273 vci : 0);
2305 if (entry->status == ESI_VC_PENDING) { 2274 found_entry = 1;
2306 if(priv->maximum_unknown_frame_count 2275 del_timer(&entry->timer);
2307 ==0) 2276 entry->vcc = vcc;
2308 entry->status = 2277 entry->old_push = old_push;
2309 ESI_FORWARD_DIRECT; 2278 if (entry->status == ESI_VC_PENDING) {
2310 else { 2279 if (priv->maximum_unknown_frame_count
2311 entry->timestamp = jiffies; 2280 == 0)
2312 entry->status = 2281 entry->status =
2313 ESI_FLUSH_PENDING; 2282 ESI_FORWARD_DIRECT;
2283 else {
2284 entry->timestamp = jiffies;
2285 entry->status =
2286 ESI_FLUSH_PENDING;
2314#if 0 2287#if 0
2315 send_to_lecd(priv,l_flush_xmt, 2288 send_to_lecd(priv, l_flush_xmt,
2316 NULL, 2289 NULL,
2317 entry->atm_addr, 2290 entry->atm_addr,
2318 NULL); 2291 NULL);
2319#endif 2292#endif
2320 } 2293 }
2321 } else { 2294 } else {
2322 /* They were forming a connection 2295 /*
2323 to us, and we to them. Our 2296 * They were forming a connection
2324 ATM address is numerically lower 2297 * to us, and we to them. Our
2325 than theirs, so we make connection 2298 * ATM address is numerically lower
2326 we formed into default VCC (8.1.11). 2299 * than theirs, so we make connection
2327 Connection they made gets torn 2300 * we formed into default VCC (8.1.11).
2328 down. This might confuse some 2301 * Connection they made gets torn
2329 clients. Can be changed if 2302 * down. This might confuse some
2330 someone reports trouble... */ 2303 * clients. Can be changed if
2331 ; 2304 * someone reports trouble...
2332 } 2305 */
2333 } 2306 ;
2334 } 2307 }
2335 } 2308 }
2336 if (found_entry) { 2309 }
2337 DPRINTK("After vcc was added\n"); 2310 }
2338 dump_arp_table(priv); 2311 if (found_entry) {
2312 DPRINTK("After vcc was added\n");
2313 dump_arp_table(priv);
2339 goto out; 2314 goto out;
2340 } 2315 }
2341 /* Not found, snatch address from first data packet that arrives from 2316 /*
2342 this vcc */ 2317 * Not found, snatch address from first data packet that arrives
2343 entry = make_entry(priv, bus_mac); 2318 * from this vcc
2344 if (!entry) 2319 */
2320 entry = make_entry(priv, bus_mac);
2321 if (!entry)
2345 goto out; 2322 goto out;
2346 entry->vcc = vcc; 2323 entry->vcc = vcc;
2347 entry->old_push = old_push; 2324 entry->old_push = old_push;
2348 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2325 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
2349 memset(entry->mac_addr, 0, ETH_ALEN); 2326 memset(entry->mac_addr, 0, ETH_ALEN);
2350 entry->status = ESI_UNKNOWN; 2327 entry->status = ESI_UNKNOWN;
2351 entry->next = priv->lec_arp_empty_ones; 2328 hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
2352 priv->lec_arp_empty_ones = entry; 2329 entry->timer.expires = jiffies + priv->vcc_timeout_period;
2353 entry->timer.expires = jiffies + priv->vcc_timeout_period; 2330 entry->timer.function = lec_arp_expire_vcc;
2354 entry->timer.function = lec_arp_expire_vcc; 2331 add_timer(&entry->timer);
2355 add_timer(&entry->timer); 2332 DPRINTK("After vcc was added\n");
2356 DPRINTK("After vcc was added\n");
2357 dump_arp_table(priv); 2333 dump_arp_table(priv);
2358out: 2334out:
2359 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2335 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2360} 2336}
2361 2337
2362static void 2338static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2363lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2364{ 2339{
2365 unsigned long flags; 2340 unsigned long flags;
2366 struct lec_arp_table *entry; 2341 struct hlist_node *node;
2367 int i; 2342 struct lec_arp_table *entry;
2368 2343 int i;
2369 DPRINTK("LEC:lec_flush_complete %lx\n",tran_id); 2344
2345 DPRINTK("LEC:lec_flush_complete %lx\n", tran_id);
2346restart:
2370 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2347 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2371 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2348 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2372 for (entry = priv->lec_arp_tables[i]; entry; entry=entry->next) { 2349 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
2373 if (entry->flush_tran_id == tran_id && 2350 if (entry->flush_tran_id == tran_id
2374 entry->status == ESI_FLUSH_PENDING) { 2351 && entry->status == ESI_FLUSH_PENDING) {
2375 struct sk_buff *skb; 2352 struct sk_buff *skb;
2376 2353 struct atm_vcc *vcc = entry->vcc;
2377 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 2354
2378 lec_send(entry->vcc, skb, entry->priv); 2355 lec_arp_hold(entry);
2379 entry->status = ESI_FORWARD_DIRECT; 2356 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2380 DPRINTK("LEC_ARP: Flushed\n"); 2357 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
2381 } 2358 lec_send(vcc, skb, entry->priv);
2382 } 2359 entry->last_used = jiffies;
2383 } 2360 entry->status = ESI_FORWARD_DIRECT;
2361 lec_arp_put(entry);
2362 DPRINTK("LEC_ARP: Flushed\n");
2363 goto restart;
2364 }
2365 }
2366 }
2384 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2367 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2385 dump_arp_table(priv); 2368 dump_arp_table(priv);
2386} 2369}
2387 2370
2388static void 2371static void
2389lec_set_flush_tran_id(struct lec_priv *priv, 2372lec_set_flush_tran_id(struct lec_priv *priv,
2390 unsigned char *atm_addr, unsigned long tran_id) 2373 unsigned char *atm_addr, unsigned long tran_id)
2391{ 2374{
2392 unsigned long flags; 2375 unsigned long flags;
2393 struct lec_arp_table *entry; 2376 struct hlist_node *node;
2394 int i; 2377 struct lec_arp_table *entry;
2378 int i;
2395 2379
2396 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2380 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2397 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2381 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2398 for(entry = priv->lec_arp_tables[i]; entry; entry=entry->next) 2382 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
2399 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2383 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2400 entry->flush_tran_id = tran_id; 2384 entry->flush_tran_id = tran_id;
2401 DPRINTK("Set flush transaction id to %lx for %p\n",tran_id,entry); 2385 DPRINTK("Set flush transaction id to %lx for %p\n",
2402 } 2386 tran_id, entry);
2387 }
2388 }
2403 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2389 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2404} 2390}
2405 2391
2406static int 2392static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
2407lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
2408{ 2393{
2409 unsigned long flags; 2394 unsigned long flags;
2410 unsigned char mac_addr[] = { 2395 unsigned char mac_addr[] = {
2411 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2396 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2412 struct lec_arp_table *to_add; 2397 };
2398 struct lec_arp_table *to_add;
2413 struct lec_vcc_priv *vpriv; 2399 struct lec_vcc_priv *vpriv;
2414 int err = 0; 2400 int err = 0;
2415 2401
2416 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 2402 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL)))
2417 return -ENOMEM; 2403 return -ENOMEM;
2418 vpriv->xoff = 0; 2404 vpriv->xoff = 0;
2419 vpriv->old_pop = vcc->pop; 2405 vpriv->old_pop = vcc->pop;
2420 vcc->user_back = vpriv; 2406 vcc->user_back = vpriv;
2421 vcc->pop = lec_pop; 2407 vcc->pop = lec_pop;
2422 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2408 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2423 to_add = make_entry(priv, mac_addr); 2409 to_add = make_entry(priv, mac_addr);
2424 if (!to_add) { 2410 if (!to_add) {
2425 vcc->pop = vpriv->old_pop; 2411 vcc->pop = vpriv->old_pop;
2426 kfree(vpriv); 2412 kfree(vpriv);
2427 err = -ENOMEM; 2413 err = -ENOMEM;
2428 goto out; 2414 goto out;
2429 } 2415 }
2430 memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN); 2416 memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN);
2431 to_add->status = ESI_FORWARD_DIRECT; 2417 to_add->status = ESI_FORWARD_DIRECT;
2432 to_add->flags |= LEC_PERMANENT_FLAG; 2418 to_add->flags |= LEC_PERMANENT_FLAG;
2433 to_add->vcc = vcc; 2419 to_add->vcc = vcc;
2434 to_add->old_push = vcc->push; 2420 to_add->old_push = vcc->push;
2435 vcc->push = lec_push; 2421 vcc->push = lec_push;
2436 priv->mcast_vcc = vcc; 2422 priv->mcast_vcc = vcc;
2437 lec_arp_add(priv, to_add); 2423 lec_arp_add(priv, to_add);
2438out: 2424out:
2439 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2425 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2440 return err; 2426 return err;
2441} 2427}
2442 2428
2443static void 2429static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2444lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2445{ 2430{
2446 unsigned long flags; 2431 unsigned long flags;
2447 struct lec_arp_table *entry, *next; 2432 struct hlist_node *node, *next;
2448 int i; 2433 struct lec_arp_table *entry;
2434 int i;
2435
2436 DPRINTK("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci);
2437 dump_arp_table(priv);
2449 2438
2450 DPRINTK("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n",vcc->vpi,vcc->vci);
2451 dump_arp_table(priv);
2452 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2439 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2453 for(i=0;i<LEC_ARP_TABLE_SIZE;i++) { 2440
2454 for(entry = priv->lec_arp_tables[i];entry; entry=next) { 2441 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2455 next = entry->next; 2442 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
2456 if (vcc == entry->vcc) { 2443 if (vcc == entry->vcc) {
2457 lec_arp_remove(priv, entry); 2444 lec_arp_remove(priv, entry);
2458 kfree(entry); 2445 lec_arp_put(entry);
2459 if (priv->mcast_vcc == vcc) { 2446 if (priv->mcast_vcc == vcc) {
2460 priv->mcast_vcc = NULL; 2447 priv->mcast_vcc = NULL;
2461 } 2448 }
2462 } 2449 }
2463 } 2450 }
2464 } 2451 }
2465 2452
2466 entry = priv->lec_arp_empty_ones; 2453 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
2467 priv->lec_arp_empty_ones = NULL; 2454 if (entry->vcc == vcc) {
2468 while (entry != NULL) { 2455 lec_arp_clear_vccs(entry);
2469 next = entry->next; 2456 del_timer(&entry->timer);
2470 if (entry->vcc == vcc) { /* leave it out from the list */ 2457 hlist_del(&entry->next);
2471 lec_arp_clear_vccs(entry); 2458 lec_arp_put(entry);
2472 del_timer(&entry->timer); 2459 }
2473 kfree(entry); 2460 }
2474 } 2461
2475 else { /* put it back to the list */ 2462 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) {
2476 entry->next = priv->lec_arp_empty_ones; 2463 if (entry->recv_vcc == vcc) {
2477 priv->lec_arp_empty_ones = entry; 2464 lec_arp_clear_vccs(entry);
2478 } 2465 del_timer(&entry->timer);
2479 entry = next; 2466 hlist_del(&entry->next);
2480 } 2467 lec_arp_put(entry);
2481 2468 }
2482 entry = priv->lec_no_forward; 2469 }
2483 priv->lec_no_forward = NULL; 2470
2484 while (entry != NULL) { 2471 hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
2485 next = entry->next; 2472 if (entry->recv_vcc == vcc) {
2486 if (entry->recv_vcc == vcc) { 2473 lec_arp_clear_vccs(entry);
2487 lec_arp_clear_vccs(entry); 2474 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
2488 del_timer(&entry->timer); 2475 hlist_del(&entry->next);
2489 kfree(entry); 2476 lec_arp_put(entry);
2490 } 2477 }
2491 else { 2478 }
2492 entry->next = priv->lec_no_forward;
2493 priv->lec_no_forward = entry;
2494 }
2495 entry = next;
2496 }
2497
2498 entry = priv->mcast_fwds;
2499 priv->mcast_fwds = NULL;
2500 while (entry != NULL) {
2501 next = entry->next;
2502 if (entry->recv_vcc == vcc) {
2503 lec_arp_clear_vccs(entry);
2504 /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
2505 kfree(entry);
2506 }
2507 else {
2508 entry->next = priv->mcast_fwds;
2509 priv->mcast_fwds = entry;
2510 }
2511 entry = next;
2512 }
2513 2479
2514 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2480 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2515 dump_arp_table(priv); 2481 dump_arp_table(priv);
@@ -2517,57 +2483,42 @@ lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2517 2483
2518static void 2484static void
2519lec_arp_check_empties(struct lec_priv *priv, 2485lec_arp_check_empties(struct lec_priv *priv,
2520 struct atm_vcc *vcc, struct sk_buff *skb) 2486 struct atm_vcc *vcc, struct sk_buff *skb)
2521{ 2487{
2522 unsigned long flags; 2488 unsigned long flags;
2523 struct lec_arp_table *entry, *prev; 2489 struct hlist_node *node, *next;
2524 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; 2490 struct lec_arp_table *entry, *tmp;
2525 unsigned char *src; 2491 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
2492 unsigned char *src;
2526#ifdef CONFIG_TR 2493#ifdef CONFIG_TR
2527 struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; 2494 struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data;
2528 2495
2529 if (priv->is_trdev) src = tr_hdr->h_source; 2496 if (priv->is_trdev)
2530 else 2497 src = tr_hdr->h_source;
2498 else
2531#endif 2499#endif
2532 src = hdr->h_source; 2500 src = hdr->h_source;
2533 2501
2534 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2502 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2535 entry = priv->lec_arp_empty_ones; 2503 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
2536 if (vcc == entry->vcc) { 2504 if (vcc == entry->vcc) {
2537 del_timer(&entry->timer); 2505 del_timer(&entry->timer);
2538 memcpy(entry->mac_addr, src, ETH_ALEN); 2506 memcpy(entry->mac_addr, src, ETH_ALEN);
2539 entry->status = ESI_FORWARD_DIRECT; 2507 entry->status = ESI_FORWARD_DIRECT;
2540 entry->last_used = jiffies; 2508 entry->last_used = jiffies;
2541 priv->lec_arp_empty_ones = entry->next; 2509 /* We might have got an entry */
2542 /* We might have got an entry */ 2510 if ((tmp = lec_arp_find(priv, src))) {
2543 if ((prev = lec_arp_find(priv,src))) { 2511 lec_arp_remove(priv, tmp);
2544 lec_arp_remove(priv, prev); 2512 lec_arp_put(tmp);
2545 kfree(prev); 2513 }
2546 } 2514 hlist_del(&entry->next);
2547 lec_arp_add(priv, entry); 2515 lec_arp_add(priv, entry);
2548 goto out; 2516 goto out;
2549 } 2517 }
2550 prev = entry; 2518 }
2551 entry = entry->next; 2519 DPRINTK("LEC_ARP: Arp_check_empties: entry not found!\n");
2552 while (entry && entry->vcc != vcc) {
2553 prev= entry;
2554 entry = entry->next;
2555 }
2556 if (!entry) {
2557 DPRINTK("LEC_ARP: Arp_check_empties: entry not found!\n");
2558 goto out;
2559 }
2560 del_timer(&entry->timer);
2561 memcpy(entry->mac_addr, src, ETH_ALEN);
2562 entry->status = ESI_FORWARD_DIRECT;
2563 entry->last_used = jiffies;
2564 prev->next = entry->next;
2565 if ((prev = lec_arp_find(priv, src))) {
2566 lec_arp_remove(priv, prev);
2567 kfree(prev);
2568 }
2569 lec_arp_add(priv, entry);
2570out: 2520out:
2571 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2521 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
2572} 2522}
2523
2573MODULE_LICENSE("GPL"); 2524MODULE_LICENSE("GPL");
diff --git a/net/atm/lec.h b/net/atm/lec.h
index c22a8bfa1f81..877f50939696 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -1,9 +1,7 @@
1/* 1/*
2 *
3 * Lan Emulation client header file 2 * Lan Emulation client header file
4 * 3 *
5 * Marko Kiiskila mkiiskila@yahoo.com 4 * Marko Kiiskila <mkiiskila@yahoo.com>
6 *
7 */ 5 */
8 6
9#ifndef _LEC_H_ 7#ifndef _LEC_H_
@@ -16,18 +14,18 @@
16#define LEC_HEADER_LEN 16 14#define LEC_HEADER_LEN 16
17 15
18struct lecdatahdr_8023 { 16struct lecdatahdr_8023 {
19 unsigned short le_header; 17 unsigned short le_header;
20 unsigned char h_dest[ETH_ALEN]; 18 unsigned char h_dest[ETH_ALEN];
21 unsigned char h_source[ETH_ALEN]; 19 unsigned char h_source[ETH_ALEN];
22 unsigned short h_type; 20 unsigned short h_type;
23}; 21};
24 22
25struct lecdatahdr_8025 { 23struct lecdatahdr_8025 {
26 unsigned short le_header; 24 unsigned short le_header;
27 unsigned char ac_pad; 25 unsigned char ac_pad;
28 unsigned char fc; 26 unsigned char fc;
29 unsigned char h_dest[ETH_ALEN]; 27 unsigned char h_dest[ETH_ALEN];
30 unsigned char h_source[ETH_ALEN]; 28 unsigned char h_source[ETH_ALEN];
31}; 29};
32 30
33#define LEC_MINIMUM_8023_SIZE 62 31#define LEC_MINIMUM_8023_SIZE 62
@@ -44,17 +42,18 @@ struct lecdatahdr_8025 {
44 * 42 *
45 */ 43 */
46struct lane2_ops { 44struct lane2_ops {
47 int (*resolve)(struct net_device *dev, u8 *dst_mac, int force, 45 int (*resolve) (struct net_device *dev, u8 *dst_mac, int force,
48 u8 **tlvs, u32 *sizeoftlvs); 46 u8 **tlvs, u32 *sizeoftlvs);
49 int (*associate_req)(struct net_device *dev, u8 *lan_dst, 47 int (*associate_req) (struct net_device *dev, u8 *lan_dst,
50 u8 *tlvs, u32 sizeoftlvs); 48 u8 *tlvs, u32 sizeoftlvs);
51 void (*associate_indicator)(struct net_device *dev, u8 *mac_addr, 49 void (*associate_indicator) (struct net_device *dev, u8 *mac_addr,
52 u8 *tlvs, u32 sizeoftlvs); 50 u8 *tlvs, u32 sizeoftlvs);
53}; 51};
54 52
55/* 53/*
56 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType 54 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
57 * frames. 55 * frames.
56 *
58 * 1. Dix Ethernet EtherType frames encoded by placing EtherType 57 * 1. Dix Ethernet EtherType frames encoded by placing EtherType
59 * field in h_type field. Data follows immediatelly after header. 58 * field in h_type field. Data follows immediatelly after header.
60 * 2. LLC Data frames whose total length, including LLC field and data, 59 * 2. LLC Data frames whose total length, including LLC field and data,
@@ -70,72 +69,88 @@ struct lane2_ops {
70#define LEC_ARP_TABLE_SIZE 16 69#define LEC_ARP_TABLE_SIZE 16
71 70
72struct lec_priv { 71struct lec_priv {
73 struct net_device_stats stats; 72 struct net_device_stats stats;
74 unsigned short lecid; /* Lecid of this client */ 73 unsigned short lecid; /* Lecid of this client */
75 struct lec_arp_table *lec_arp_empty_ones; 74 struct hlist_head lec_arp_empty_ones;
76 /* Used for storing VCC's that don't have a MAC address attached yet */ 75 /* Used for storing VCC's that don't have a MAC address attached yet */
77 struct lec_arp_table *lec_arp_tables[LEC_ARP_TABLE_SIZE]; 76 struct hlist_head lec_arp_tables[LEC_ARP_TABLE_SIZE];
78 /* Actual LE ARP table */ 77 /* Actual LE ARP table */
79 struct lec_arp_table *lec_no_forward; 78 struct hlist_head lec_no_forward;
80 /* Used for storing VCC's (and forward packets from) which are to 79 /*
81 age out by not using them to forward packets. 80 * Used for storing VCC's (and forward packets from) which are to
82 This is because to some LE clients there will be 2 VCCs. Only 81 * age out by not using them to forward packets.
83 one of them gets used. */ 82 * This is because to some LE clients there will be 2 VCCs. Only
84 struct lec_arp_table *mcast_fwds; 83 * one of them gets used.
85 /* With LANEv2 it is possible that BUS (or a special multicast server) 84 */
86 establishes multiple Multicast Forward VCCs to us. This list 85 struct hlist_head mcast_fwds;
87 collects all those VCCs. LANEv1 client has only one item in this 86 /*
88 list. These entries are not aged out. */ 87 * With LANEv2 it is possible that BUS (or a special multicast server)
89 spinlock_t lec_arp_lock; 88 * establishes multiple Multicast Forward VCCs to us. This list
90 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ 89 * collects all those VCCs. LANEv1 client has only one item in this
91 struct atm_vcc *lecd; 90 * list. These entries are not aged out.
92 struct timer_list lec_arp_timer; 91 */
93 /* C10 */ 92 spinlock_t lec_arp_lock;
94 unsigned int maximum_unknown_frame_count; 93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
95/* Within the period of time defined by this variable, the client will send 94 struct atm_vcc *lecd;
96 no more than C10 frames to BUS for a given unicast destination. (C11) */ 95 struct work_struct lec_arp_work; /* C10 */
97 unsigned long max_unknown_frame_time; 96 unsigned int maximum_unknown_frame_count;
98/* If no traffic has been sent in this vcc for this period of time, 97 /*
99 vcc will be torn down (C12)*/ 98 * Within the period of time defined by this variable, the client will send
100 unsigned long vcc_timeout_period; 99 * no more than C10 frames to BUS for a given unicast destination. (C11)
101/* An LE Client MUST not retry an LE_ARP_REQUEST for a 100 */
102 given frame's LAN Destination more than maximum retry count times, 101 unsigned long max_unknown_frame_time;
103 after the first LEC_ARP_REQUEST (C13)*/ 102 /*
104 unsigned short max_retry_count; 103 * If no traffic has been sent in this vcc for this period of time,
105/* Max time the client will maintain an entry in its arp cache in 104 * vcc will be torn down (C12)
106 absence of a verification of that relationship (C17)*/ 105 */
107 unsigned long aging_time; 106 unsigned long vcc_timeout_period;
108/* Max time the client will maintain an entry in cache when 107 /*
109 topology change flag is true (C18) */ 108 * An LE Client MUST not retry an LE_ARP_REQUEST for a
110 unsigned long forward_delay_time; 109 * given frame's LAN Destination more than maximum retry count times,
111/* Topology change flag (C19)*/ 110 * after the first LEC_ARP_REQUEST (C13)
112 int topology_change; 111 */
113/* Max time the client expects an LE_ARP_REQUEST/LE_ARP_RESPONSE 112 unsigned short max_retry_count;
114 cycle to take (C20)*/ 113 /*
115 unsigned long arp_response_time; 114 * Max time the client will maintain an entry in its arp cache in
116/* Time limit ot wait to receive an LE_FLUSH_RESPONSE after the 115 * absence of a verification of that relationship (C17)
117 LE_FLUSH_REQUEST has been sent before taking recover action. (C21)*/ 116 */
118 unsigned long flush_timeout; 117 unsigned long aging_time;
119/* The time since sending a frame to the bus after which the 118 /*
120 LE Client may assume that the frame has been either discarded or 119 * Max time the client will maintain an entry in cache when
121 delivered to the recipient (C22) */ 120 * topology change flag is true (C18)
122 unsigned long path_switching_delay; 121 */
122 unsigned long forward_delay_time; /* Topology change flag (C19) */
123 int topology_change;
124 /*
125 * Max time the client expects an LE_ARP_REQUEST/LE_ARP_RESPONSE
126 * cycle to take (C20)
127 */
128 unsigned long arp_response_time;
129 /*
130 * Time limit ot wait to receive an LE_FLUSH_RESPONSE after the
131 * LE_FLUSH_REQUEST has been sent before taking recover action. (C21)
132 */
133 unsigned long flush_timeout;
134 /* The time since sending a frame to the bus after which the
135 * LE Client may assume that the frame has been either discarded or
136 * delivered to the recipient (C22)
137 */
138 unsigned long path_switching_delay;
123 139
124 u8 *tlvs; /* LANE2: TLVs are new */ 140 u8 *tlvs; /* LANE2: TLVs are new */
125 u32 sizeoftlvs; /* The size of the tlv array in bytes */ 141 u32 sizeoftlvs; /* The size of the tlv array in bytes */
126 int lane_version; /* LANE2 */ 142 int lane_version; /* LANE2 */
127 int itfnum; /* e.g. 2 for lec2, 5 for lec5 */ 143 int itfnum; /* e.g. 2 for lec2, 5 for lec5 */
128 struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */ 144 struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */
129 int is_proxy; /* bridge between ATM and Ethernet */ 145 int is_proxy; /* bridge between ATM and Ethernet */
130 int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */ 146 int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */
131}; 147};
132 148
133struct lec_vcc_priv { 149struct lec_vcc_priv {
134 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); 150 void (*old_pop) (struct atm_vcc *vcc, struct sk_buff *skb);
135 int xoff; 151 int xoff;
136}; 152};
137 153
138#define LEC_VCC_PRIV(vcc) ((struct lec_vcc_priv *)((vcc)->user_back)) 154#define LEC_VCC_PRIV(vcc) ((struct lec_vcc_priv *)((vcc)->user_back))
139 155
140#endif /* _LEC_H_ */ 156#endif /* _LEC_H_ */
141
diff --git a/net/atm/lec_arpc.h b/net/atm/lec_arpc.h
index 397448094648..ec67435a40a6 100644
--- a/net/atm/lec_arpc.h
+++ b/net/atm/lec_arpc.h
@@ -1,92 +1,96 @@
1/* 1/*
2 * Lec arp cache 2 * Lec arp cache
3 * Marko Kiiskila mkiiskila@yahoo.com
4 * 3 *
4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
6#ifndef _LEC_ARP_H 6#ifndef _LEC_ARP_H_
7#define _LEC_ARP_H 7#define _LEC_ARP_H_
8#include <linux/atm.h> 8#include <linux/atm.h>
9#include <linux/atmdev.h> 9#include <linux/atmdev.h>
10#include <linux/if_ether.h> 10#include <linux/if_ether.h>
11#include <linux/atmlec.h> 11#include <linux/atmlec.h>
12 12
13struct lec_arp_table { 13struct lec_arp_table {
14 struct lec_arp_table *next; /* Linked entry list */ 14 struct hlist_node next; /* Linked entry list */
15 unsigned char atm_addr[ATM_ESA_LEN]; /* Atm address */ 15 unsigned char atm_addr[ATM_ESA_LEN]; /* Atm address */
16 unsigned char mac_addr[ETH_ALEN]; /* Mac address */ 16 unsigned char mac_addr[ETH_ALEN]; /* Mac address */
17 int is_rdesc; /* Mac address is a route descriptor */ 17 int is_rdesc; /* Mac address is a route descriptor */
18 struct atm_vcc *vcc; /* Vcc this entry is attached */ 18 struct atm_vcc *vcc; /* Vcc this entry is attached */
19 struct atm_vcc *recv_vcc; /* Vcc we receive data from */ 19 struct atm_vcc *recv_vcc; /* Vcc we receive data from */
20 void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb);
21 /* Push that leads to daemon */
22 void (*old_recv_push)(struct atm_vcc *vcc, struct sk_buff *skb);
23 /* Push that leads to daemon */
24 void (*old_close)(struct atm_vcc *vcc);
25 /* We want to see when this
26 * vcc gets closed */
27 unsigned long last_used; /* For expiry */
28 unsigned long timestamp; /* Used for various timestamping
29 * things:
30 * 1. FLUSH started
31 * (status=ESI_FLUSH_PENDING)
32 * 2. Counting to
33 * max_unknown_frame_time
34 * (status=ESI_ARP_PENDING||
35 * status=ESI_VC_PENDING)
36 */
37 unsigned char no_tries; /* No of times arp retry has been
38 tried */
39 unsigned char status; /* Status of this entry */
40 unsigned short flags; /* Flags for this entry */
41 unsigned short packets_flooded; /* Data packets flooded */
42 unsigned long flush_tran_id; /* Transaction id in flush protocol */
43 struct timer_list timer; /* Arping timer */
44 struct lec_priv *priv; /* Pointer back */
45 20
46 u8 *tlvs; /* LANE2: Each MAC address can have TLVs */ 21 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb);
47 u32 sizeoftlvs; /* associated with it. sizeoftlvs tells the */ 22 /* Push that leads to daemon */
48 /* the length of the tlvs array */ 23
49 struct sk_buff_head tx_wait; /* wait queue for outgoing packets */ 24 void (*old_recv_push) (struct atm_vcc *vcc, struct sk_buff *skb);
25 /* Push that leads to daemon */
26
27 unsigned long last_used; /* For expiry */
28 unsigned long timestamp; /* Used for various timestamping things:
29 * 1. FLUSH started
30 * (status=ESI_FLUSH_PENDING)
31 * 2. Counting to
32 * max_unknown_frame_time
33 * (status=ESI_ARP_PENDING||
34 * status=ESI_VC_PENDING)
35 */
36 unsigned char no_tries; /* No of times arp retry has been tried */
37 unsigned char status; /* Status of this entry */
38 unsigned short flags; /* Flags for this entry */
39 unsigned short packets_flooded; /* Data packets flooded */
40 unsigned long flush_tran_id; /* Transaction id in flush protocol */
41 struct timer_list timer; /* Arping timer */
42 struct lec_priv *priv; /* Pointer back */
43 u8 *tlvs;
44 u32 sizeoftlvs; /*
45 * LANE2: Each MAC address can have TLVs
46 * associated with it. sizeoftlvs tells the
47 * the length of the tlvs array
48 */
49 struct sk_buff_head tx_wait; /* wait queue for outgoing packets */
50 atomic_t usage; /* usage count */
50}; 51};
51 52
52struct tlv { /* LANE2: Template tlv struct for accessing */ 53/*
53 /* the tlvs in the lec_arp_table->tlvs array*/ 54 * LANE2: Template tlv struct for accessing
54 u32 type; 55 * the tlvs in the lec_arp_table->tlvs array
55 u8 length; 56 */
56 u8 value[255]; 57struct tlv {
58 u32 type;
59 u8 length;
60 u8 value[255];
57}; 61};
58 62
59/* Status fields */ 63/* Status fields */
60#define ESI_UNKNOWN 0 /* 64#define ESI_UNKNOWN 0 /*
61 * Next packet sent to this mac address 65 * Next packet sent to this mac address
62 * causes ARP-request to be sent 66 * causes ARP-request to be sent
63 */ 67 */
64#define ESI_ARP_PENDING 1 /* 68#define ESI_ARP_PENDING 1 /*
65 * There is no ATM address associated with this 69 * There is no ATM address associated with this
66 * 48-bit address. The LE-ARP protocol is in 70 * 48-bit address. The LE-ARP protocol is in
67 * progress. 71 * progress.
68 */ 72 */
69#define ESI_VC_PENDING 2 /* 73#define ESI_VC_PENDING 2 /*
70 * There is a valid ATM address associated with 74 * There is a valid ATM address associated with
71 * this 48-bit address but there is no VC set 75 * this 48-bit address but there is no VC set
72 * up to that ATM address. The signaling 76 * up to that ATM address. The signaling
73 * protocol is in process. 77 * protocol is in process.
74 */ 78 */
75#define ESI_FLUSH_PENDING 4 /* 79#define ESI_FLUSH_PENDING 4 /*
76 * The LEC has been notified of the FLUSH_START 80 * The LEC has been notified of the FLUSH_START
77 * status and it is assumed that the flush 81 * status and it is assumed that the flush
78 * protocol is in process. 82 * protocol is in process.
79 */ 83 */
80#define ESI_FORWARD_DIRECT 5 /* 84#define ESI_FORWARD_DIRECT 5 /*
81 * Either the Path Switching Delay (C22) has 85 * Either the Path Switching Delay (C22) has
82 * elapsed or the LEC has notified the Mapping 86 * elapsed or the LEC has notified the Mapping
83 * that the flush protocol has completed. In 87 * that the flush protocol has completed. In
84 * either case, it is safe to forward packets 88 * either case, it is safe to forward packets
85 * to this address via the data direct VC. 89 * to this address via the data direct VC.
86 */ 90 */
87 91
88/* Flag values */ 92/* Flag values */
89#define LEC_REMOTE_FLAG 0x0001 93#define LEC_REMOTE_FLAG 0x0001
90#define LEC_PERMANENT_FLAG 0x0002 94#define LEC_PERMANENT_FLAG 0x0002
91 95
92#endif 96#endif /* _LEC_ARP_H_ */
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index b87c2a88bdce..0d2b994af511 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -560,7 +560,6 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
560 struct atmmpc_ioc ioc_data; 560 struct atmmpc_ioc ioc_data;
561 in_cache_entry *in_entry; 561 in_cache_entry *in_entry;
562 uint32_t ipaddr; 562 uint32_t ipaddr;
563 unsigned char *ip;
564 563
565 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); 564 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
566 if (bytes_left != 0) { 565 if (bytes_left != 0) {
@@ -583,9 +582,8 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
583 if (in_entry != NULL) mpc->in_ops->put(in_entry); 582 if (in_entry != NULL) mpc->in_ops->put(in_entry);
584 return -EINVAL; 583 return -EINVAL;
585 } 584 }
586 ip = (unsigned char*)&in_entry->ctrl_info.in_dst_ip;
587 printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %u.%u.%u.%u\n", 585 printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %u.%u.%u.%u\n",
588 mpc->dev->name, ip[0], ip[1], ip[2], ip[3]); 586 mpc->dev->name, NIPQUAD(in_entry->ctrl_info.in_dst_ip));
589 in_entry->shortcut = vcc; 587 in_entry->shortcut = vcc;
590 mpc->in_ops->put(in_entry); 588 mpc->in_ops->put(in_entry);
591 } else { 589 } else {
@@ -616,10 +614,8 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
616 dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name); 614 dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name);
617 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc); 615 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
618 if (in_entry) { 616 if (in_entry) {
619 unsigned char *ip __attribute__ ((unused)) =
620 (unsigned char *)&in_entry->ctrl_info.in_dst_ip;
621 dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %u.%u.%u.%u\n", 617 dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %u.%u.%u.%u\n",
622 mpc->dev->name, ip[0], ip[1], ip[2], ip[3]); 618 mpc->dev->name, NIPQUAD(in_entry->ctrl_info.in_dst_ip));
623 in_entry->shortcut = NULL; 619 in_entry->shortcut = NULL;
624 mpc->in_ops->put(in_entry); 620 mpc->in_ops->put(in_entry);
625 } 621 }
@@ -1154,18 +1150,17 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1154{ 1150{
1155 uint32_t dst_ip = msg->content.in_info.in_dst_ip; 1151 uint32_t dst_ip = msg->content.in_info.in_dst_ip;
1156 uint32_t mask = msg->ip_mask; 1152 uint32_t mask = msg->ip_mask;
1157 unsigned char *ip = (unsigned char *)&dst_ip;
1158 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); 1153 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
1159 1154
1160 if(entry == NULL){ 1155 if(entry == NULL){
1161 printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ", mpc->dev->name); 1156 printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ", mpc->dev->name);
1162 printk("ip = %u.%u.%u.%u\n", ip[0], ip[1], ip[2], ip[3]); 1157 printk("ip = %u.%u.%u.%u\n", NIPQUAD(dst_ip));
1163 return; 1158 return;
1164 } 1159 }
1165 1160
1166 do { 1161 do {
1167 dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %u.%u.%u.%u\n" , 1162 dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %u.%u.%u.%u\n" ,
1168 mpc->dev->name, ip[0], ip[1], ip[2], ip[3]); 1163 mpc->dev->name, NIPQUAD(dst_ip));
1169 write_lock_bh(&mpc->ingress_lock); 1164 write_lock_bh(&mpc->ingress_lock);
1170 mpc->in_ops->remove_entry(entry, mpc); 1165 mpc->in_ops->remove_entry(entry, mpc);
1171 write_unlock_bh(&mpc->ingress_lock); 1166 write_unlock_bh(&mpc->ingress_lock);
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 781ed1b9329d..fbf13cdcf46e 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -87,7 +87,6 @@ static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
87static in_cache_entry *in_cache_add_entry(uint32_t dst_ip, 87static in_cache_entry *in_cache_add_entry(uint32_t dst_ip,
88 struct mpoa_client *client) 88 struct mpoa_client *client)
89{ 89{
90 unsigned char *ip __attribute__ ((unused)) = (unsigned char *)&dst_ip;
91 in_cache_entry* entry = kmalloc(sizeof(in_cache_entry), GFP_KERNEL); 90 in_cache_entry* entry = kmalloc(sizeof(in_cache_entry), GFP_KERNEL);
92 91
93 if (entry == NULL) { 92 if (entry == NULL) {
@@ -95,7 +94,7 @@ static in_cache_entry *in_cache_add_entry(uint32_t dst_ip,
95 return NULL; 94 return NULL;
96 } 95 }
97 96
98 dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %u.%u.%u.%u\n", ip[0], ip[1], ip[2], ip[3]); 97 dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %u.%u.%u.%u\n", NIPQUAD(dst_ip));
99 memset(entry,0,sizeof(in_cache_entry)); 98 memset(entry,0,sizeof(in_cache_entry));
100 99
101 atomic_set(&entry->use, 1); 100 atomic_set(&entry->use, 1);
@@ -152,10 +151,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
152 151
153 if( entry->count > mpc->parameters.mpc_p1 && 152 if( entry->count > mpc->parameters.mpc_p1 &&
154 entry->entry_state == INGRESS_INVALID){ 153 entry->entry_state == INGRESS_INVALID){
155 unsigned char *ip __attribute__ ((unused)) = 154 dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %u.%u.%u.%u, sending MPOA res req\n", mpc->dev->name, NIPQUAD(entry->ctrl_info.in_dst_ip));
156 (unsigned char *)&entry->ctrl_info.in_dst_ip;
157
158 dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %u.%u.%u.%u, sending MPOA res req\n", mpc->dev->name, ip[0], ip[1], ip[2], ip[3]);
159 entry->entry_state = INGRESS_RESOLVING; 155 entry->entry_state = INGRESS_RESOLVING;
160 msg.type = SND_MPOA_RES_RQST; 156 msg.type = SND_MPOA_RES_RQST;
161 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN ); 157 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN );
@@ -187,11 +183,9 @@ static void in_cache_remove_entry(in_cache_entry *entry,
187{ 183{
188 struct atm_vcc *vcc; 184 struct atm_vcc *vcc;
189 struct k_message msg; 185 struct k_message msg;
190 unsigned char *ip;
191 186
192 vcc = entry->shortcut; 187 vcc = entry->shortcut;
193 ip = (unsigned char *)&entry->ctrl_info.in_dst_ip; 188 dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %u.%u.%u.%u\n",NIPQUAD(entry->ctrl_info.in_dst_ip));
194 dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %u.%u.%u.%u\n",ip[0], ip[1], ip[2], ip[3]);
195 189
196 if (entry->prev != NULL) 190 if (entry->prev != NULL)
197 entry->prev->next = entry->next; 191 entry->prev->next = entry->next;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 788ea7a2b744..67df99e2e5c8 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -48,41 +48,56 @@
48#define BT_DBG(D...) 48#define BT_DBG(D...)
49#endif 49#endif
50 50
51#define VERSION "2.10" 51#define VERSION "2.11"
52 52
53/* Bluetooth sockets */ 53/* Bluetooth sockets */
54#define BT_MAX_PROTO 8 54#define BT_MAX_PROTO 8
55static struct net_proto_family *bt_proto[BT_MAX_PROTO]; 55static struct net_proto_family *bt_proto[BT_MAX_PROTO];
56static DEFINE_RWLOCK(bt_proto_lock);
56 57
57int bt_sock_register(int proto, struct net_proto_family *ops) 58int bt_sock_register(int proto, struct net_proto_family *ops)
58{ 59{
60 int err = 0;
61
59 if (proto < 0 || proto >= BT_MAX_PROTO) 62 if (proto < 0 || proto >= BT_MAX_PROTO)
60 return -EINVAL; 63 return -EINVAL;
61 64
65 write_lock(&bt_proto_lock);
66
62 if (bt_proto[proto]) 67 if (bt_proto[proto])
63 return -EEXIST; 68 err = -EEXIST;
69 else
70 bt_proto[proto] = ops;
64 71
65 bt_proto[proto] = ops; 72 write_unlock(&bt_proto_lock);
66 return 0; 73
74 return err;
67} 75}
68EXPORT_SYMBOL(bt_sock_register); 76EXPORT_SYMBOL(bt_sock_register);
69 77
70int bt_sock_unregister(int proto) 78int bt_sock_unregister(int proto)
71{ 79{
80 int err = 0;
81
72 if (proto < 0 || proto >= BT_MAX_PROTO) 82 if (proto < 0 || proto >= BT_MAX_PROTO)
73 return -EINVAL; 83 return -EINVAL;
74 84
85 write_lock(&bt_proto_lock);
86
75 if (!bt_proto[proto]) 87 if (!bt_proto[proto])
76 return -ENOENT; 88 err = -ENOENT;
89 else
90 bt_proto[proto] = NULL;
77 91
78 bt_proto[proto] = NULL; 92 write_unlock(&bt_proto_lock);
79 return 0; 93
94 return err;
80} 95}
81EXPORT_SYMBOL(bt_sock_unregister); 96EXPORT_SYMBOL(bt_sock_unregister);
82 97
83static int bt_sock_create(struct socket *sock, int proto) 98static int bt_sock_create(struct socket *sock, int proto)
84{ 99{
85 int err = 0; 100 int err;
86 101
87 if (proto < 0 || proto >= BT_MAX_PROTO) 102 if (proto < 0 || proto >= BT_MAX_PROTO)
88 return -EINVAL; 103 return -EINVAL;
@@ -92,11 +107,18 @@ static int bt_sock_create(struct socket *sock, int proto)
92 request_module("bt-proto-%d", proto); 107 request_module("bt-proto-%d", proto);
93 } 108 }
94#endif 109#endif
110
95 err = -EPROTONOSUPPORT; 111 err = -EPROTONOSUPPORT;
112
113 read_lock(&bt_proto_lock);
114
96 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { 115 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
97 err = bt_proto[proto]->create(sock, proto); 116 err = bt_proto[proto]->create(sock, proto);
98 module_put(bt_proto[proto]->owner); 117 module_put(bt_proto[proto]->owner);
99 } 118 }
119
120 read_unlock(&bt_proto_lock);
121
100 return err; 122 return err;
101} 123}
102 124
@@ -276,7 +298,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
276 set_current_state(TASK_INTERRUPTIBLE); 298 set_current_state(TASK_INTERRUPTIBLE);
277 299
278 if (!timeo) { 300 if (!timeo) {
279 err = -EAGAIN; 301 err = -EINPROGRESS;
280 break; 302 break;
281 } 303 }
282 304
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index e620061fb50f..4d3424c2421c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -51,6 +51,7 @@
51#include <asm/unaligned.h> 51#include <asm/unaligned.h>
52 52
53#include <net/bluetooth/bluetooth.h> 53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
54#include <net/bluetooth/l2cap.h> 55#include <net/bluetooth/l2cap.h>
55 56
56#include "bnep.h" 57#include "bnep.h"
@@ -515,6 +516,24 @@ static int bnep_session(void *arg)
515 return 0; 516 return 0;
516} 517}
517 518
519static struct device *bnep_get_device(struct bnep_session *session)
520{
521 bdaddr_t *src = &bt_sk(session->sock->sk)->src;
522 bdaddr_t *dst = &bt_sk(session->sock->sk)->dst;
523 struct hci_dev *hdev;
524 struct hci_conn *conn;
525
526 hdev = hci_get_route(dst, src);
527 if (!hdev)
528 return NULL;
529
530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
531
532 hci_dev_put(hdev);
533
534 return conn ? &conn->dev : NULL;
535}
536
518int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) 537int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
519{ 538{
520 struct net_device *dev; 539 struct net_device *dev;
@@ -534,7 +553,6 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
534 if (!dev) 553 if (!dev)
535 return -ENOMEM; 554 return -ENOMEM;
536 555
537
538 down_write(&bnep_session_sem); 556 down_write(&bnep_session_sem);
539 557
540 ss = __bnep_get_session(dst); 558 ss = __bnep_get_session(dst);
@@ -551,7 +569,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
551 memcpy(s->eh.h_source, &dst, ETH_ALEN); 569 memcpy(s->eh.h_source, &dst, ETH_ALEN);
552 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); 570 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
553 571
554 s->dev = dev; 572 s->dev = dev;
555 s->sock = sock; 573 s->sock = sock;
556 s->role = req->role; 574 s->role = req->role;
557 s->state = BT_CONNECTED; 575 s->state = BT_CONNECTED;
@@ -568,6 +586,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
568 bnep_set_default_proto_filter(s); 586 bnep_set_default_proto_filter(s);
569#endif 587#endif
570 588
589 SET_NETDEV_DEV(dev, bnep_get_device(s));
590
571 err = register_netdev(dev); 591 err = register_netdev(dev);
572 if (err) { 592 if (err) {
573 goto failed; 593 goto failed;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 28c55835422a..5563db1bf526 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -43,6 +43,7 @@
43#include <linux/ioctl.h> 43#include <linux/ioctl.h>
44#include <linux/file.h> 44#include <linux/file.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/compat.h>
46#include <net/sock.h> 47#include <net/sock.h>
47 48
48#include <asm/system.h> 49#include <asm/system.h>
@@ -146,24 +147,56 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
146 return 0; 147 return 0;
147} 148}
148 149
150#ifdef CONFIG_COMPAT
151static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
152{
153 if (cmd == BNEPGETCONNLIST) {
154 struct bnep_connlist_req cl;
155 uint32_t uci;
156 int err;
157
158 if (get_user(cl.cnum, (uint32_t __user *) arg) ||
159 get_user(uci, (u32 __user *) (arg + 4)))
160 return -EFAULT;
161
162 cl.ci = compat_ptr(uci);
163
164 if (cl.cnum <= 0)
165 return -EINVAL;
166
167 err = bnep_get_connlist(&cl);
168
169 if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
170 err = -EFAULT;
171
172 return err;
173 }
174
175 return bnep_sock_ioctl(sock, cmd, arg);
176}
177#endif
178
149static const struct proto_ops bnep_sock_ops = { 179static const struct proto_ops bnep_sock_ops = {
150 .family = PF_BLUETOOTH, 180 .family = PF_BLUETOOTH,
151 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
152 .release = bnep_sock_release, 182 .release = bnep_sock_release,
153 .ioctl = bnep_sock_ioctl, 183 .ioctl = bnep_sock_ioctl,
154 .bind = sock_no_bind, 184#ifdef CONFIG_COMPAT
155 .getname = sock_no_getname, 185 .compat_ioctl = bnep_sock_compat_ioctl,
156 .sendmsg = sock_no_sendmsg, 186#endif
157 .recvmsg = sock_no_recvmsg, 187 .bind = sock_no_bind,
158 .poll = sock_no_poll, 188 .getname = sock_no_getname,
159 .listen = sock_no_listen, 189 .sendmsg = sock_no_sendmsg,
160 .shutdown = sock_no_shutdown, 190 .recvmsg = sock_no_recvmsg,
161 .setsockopt = sock_no_setsockopt, 191 .poll = sock_no_poll,
162 .getsockopt = sock_no_getsockopt, 192 .listen = sock_no_listen,
163 .connect = sock_no_connect, 193 .shutdown = sock_no_shutdown,
164 .socketpair = sock_no_socketpair, 194 .setsockopt = sock_no_setsockopt,
165 .accept = sock_no_accept, 195 .getsockopt = sock_no_getsockopt,
166 .mmap = sock_no_mmap 196 .connect = sock_no_connect,
197 .socketpair = sock_no_socketpair,
198 .accept = sock_no_accept,
199 .mmap = sock_no_mmap
167}; 200};
168 201
169static struct proto bnep_proto = { 202static struct proto bnep_proto = {
@@ -181,7 +214,7 @@ static int bnep_sock_create(struct socket *sock, int protocol)
181 if (sock->type != SOCK_RAW) 214 if (sock->type != SOCK_RAW)
182 return -ESOCKTNOSUPPORT; 215 return -ESOCKTNOSUPPORT;
183 216
184 sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &bnep_proto, 1); 217 sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1);
185 if (!sk) 218 if (!sk)
186 return -ENOMEM; 219 return -ENOMEM;
187 220
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 10ad7fd91d83..53295d33dc5c 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -34,6 +34,7 @@
34#include <linux/socket.h> 34#include <linux/socket.h>
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/compat.h>
37#include <net/sock.h> 38#include <net/sock.h>
38 39
39#include <linux/isdn/capilli.h> 40#include <linux/isdn/capilli.h>
@@ -137,11 +138,43 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
137 return -EINVAL; 138 return -EINVAL;
138} 139}
139 140
141#ifdef CONFIG_COMPAT
142static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
143{
144 if (cmd == CMTPGETCONNLIST) {
145 struct cmtp_connlist_req cl;
146 uint32_t uci;
147 int err;
148
149 if (get_user(cl.cnum, (uint32_t __user *) arg) ||
150 get_user(uci, (u32 __user *) (arg + 4)))
151 return -EFAULT;
152
153 cl.ci = compat_ptr(uci);
154
155 if (cl.cnum <= 0)
156 return -EINVAL;
157
158 err = cmtp_get_connlist(&cl);
159
160 if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
161 err = -EFAULT;
162
163 return err;
164 }
165
166 return cmtp_sock_ioctl(sock, cmd, arg);
167}
168#endif
169
140static const struct proto_ops cmtp_sock_ops = { 170static const struct proto_ops cmtp_sock_ops = {
141 .family = PF_BLUETOOTH, 171 .family = PF_BLUETOOTH,
142 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
143 .release = cmtp_sock_release, 173 .release = cmtp_sock_release,
144 .ioctl = cmtp_sock_ioctl, 174 .ioctl = cmtp_sock_ioctl,
175#ifdef CONFIG_COMPAT
176 .compat_ioctl = cmtp_sock_compat_ioctl,
177#endif
145 .bind = sock_no_bind, 178 .bind = sock_no_bind,
146 .getname = sock_no_getname, 179 .getname = sock_no_getname,
147 .sendmsg = sock_no_sendmsg, 180 .sendmsg = sock_no_sendmsg,
@@ -172,7 +205,7 @@ static int cmtp_sock_create(struct socket *sock, int protocol)
172 if (sock->type != SOCK_RAW) 205 if (sock->type != SOCK_RAW)
173 return -ESOCKTNOSUPPORT; 206 return -ESOCKTNOSUPPORT;
174 207
175 sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &cmtp_proto, 1); 208 sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1);
176 if (!sk) 209 if (!sk)
177 return -ENOMEM; 210 return -ENOMEM;
178 211
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 420ed4d7e57e..6cd5711fa28a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -51,7 +51,7 @@
51#define BT_DBG(D...) 51#define BT_DBG(D...)
52#endif 52#endif
53 53
54static void hci_acl_connect(struct hci_conn *conn) 54void hci_acl_connect(struct hci_conn *conn)
55{ 55{
56 struct hci_dev *hdev = conn->hdev; 56 struct hci_dev *hdev = conn->hdev;
57 struct inquiry_entry *ie; 57 struct inquiry_entry *ie;
@@ -63,6 +63,8 @@ static void hci_acl_connect(struct hci_conn *conn)
63 conn->out = 1; 63 conn->out = 1;
64 conn->link_mode = HCI_LM_MASTER; 64 conn->link_mode = HCI_LM_MASTER;
65 65
66 conn->attempt++;
67
66 memset(&cp, 0, sizeof(cp)); 68 memset(&cp, 0, sizeof(cp));
67 bacpy(&cp.bdaddr, &conn->dst); 69 bacpy(&cp.bdaddr, &conn->dst);
68 cp.pscan_rep_mode = 0x02; 70 cp.pscan_rep_mode = 0x02;
@@ -80,10 +82,24 @@ static void hci_acl_connect(struct hci_conn *conn)
80 cp.role_switch = 0x01; 82 cp.role_switch = 0x01;
81 else 83 else
82 cp.role_switch = 0x00; 84 cp.role_switch = 0x00;
83 85
84 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp); 86 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp);
85} 87}
86 88
89static void hci_acl_connect_cancel(struct hci_conn *conn)
90{
91 struct hci_cp_create_conn_cancel cp;
92
93 BT_DBG("%p", conn);
94
95 if (conn->hdev->hci_ver < 2)
96 return;
97
98 bacpy(&cp.bdaddr, &conn->dst);
99 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
100 OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp);
101}
102
87void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 103void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
88{ 104{
89 struct hci_cp_disconnect cp; 105 struct hci_cp_disconnect cp;
@@ -94,7 +110,8 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
94 110
95 cp.handle = __cpu_to_le16(conn->handle); 111 cp.handle = __cpu_to_le16(conn->handle);
96 cp.reason = reason; 112 cp.reason = reason;
97 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_DISCONNECT, sizeof(cp), &cp); 113 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
114 OCF_DISCONNECT, sizeof(cp), &cp);
98} 115}
99 116
100void hci_add_sco(struct hci_conn *conn, __u16 handle) 117void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -124,12 +141,20 @@ static void hci_conn_timeout(unsigned long arg)
124 return; 141 return;
125 142
126 hci_dev_lock(hdev); 143 hci_dev_lock(hdev);
127 if (conn->state == BT_CONNECTED) 144
145 switch (conn->state) {
146 case BT_CONNECT:
147 hci_acl_connect_cancel(conn);
148 break;
149 case BT_CONNECTED:
128 hci_acl_disconn(conn, 0x13); 150 hci_acl_disconn(conn, 0x13);
129 else 151 break;
152 default:
130 conn->state = BT_CLOSED; 153 conn->state = BT_CLOSED;
154 break;
155 }
156
131 hci_dev_unlock(hdev); 157 hci_dev_unlock(hdev);
132 return;
133} 158}
134 159
135static void hci_conn_idle(unsigned long arg) 160static void hci_conn_idle(unsigned long arg)
@@ -179,6 +204,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
179 if (hdev->notify) 204 if (hdev->notify)
180 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 205 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
181 206
207 hci_conn_add_sysfs(conn);
208
182 tasklet_enable(&hdev->tx_task); 209 tasklet_enable(&hdev->tx_task);
183 210
184 return conn; 211 return conn;
@@ -211,6 +238,8 @@ int hci_conn_del(struct hci_conn *conn)
211 238
212 tasklet_disable(&hdev->tx_task); 239 tasklet_disable(&hdev->tx_task);
213 240
241 hci_conn_del_sysfs(conn);
242
214 hci_conn_hash_del(hdev, conn); 243 hci_conn_hash_del(hdev, conn);
215 if (hdev->notify) 244 if (hdev->notify)
216 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 245 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -221,7 +250,9 @@ int hci_conn_del(struct hci_conn *conn)
221 250
222 hci_dev_put(hdev); 251 hci_dev_put(hdev);
223 252
224 kfree(conn); 253 /* will free via device release */
254 put_device(&conn->dev);
255
225 return 0; 256 return 0;
226} 257}
227 258
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 5ed474277903..338ae977a31b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -206,6 +206,9 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
206 /* Read Local Supported Features */ 206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); 207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
208 208
209 /* Read Local Version */
210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL);
211
209 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); 213 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
211 214
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 3896dabab11d..65f094845719 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -62,6 +62,7 @@ static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
62 62
63 switch (ocf) { 63 switch (ocf) {
64 case OCF_INQUIRY_CANCEL: 64 case OCF_INQUIRY_CANCEL:
65 case OCF_EXIT_PERIODIC_INQ:
65 status = *((__u8 *) skb->data); 66 status = *((__u8 *) skb->data);
66 67
67 if (status) { 68 if (status) {
@@ -297,6 +298,7 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
297/* Command Complete OGF INFO_PARAM */ 298/* Command Complete OGF INFO_PARAM */
298static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) 299static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
299{ 300{
301 struct hci_rp_read_loc_version *lv;
300 struct hci_rp_read_local_features *lf; 302 struct hci_rp_read_local_features *lf;
301 struct hci_rp_read_buffer_size *bs; 303 struct hci_rp_read_buffer_size *bs;
302 struct hci_rp_read_bd_addr *ba; 304 struct hci_rp_read_bd_addr *ba;
@@ -304,6 +306,23 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
304 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 306 BT_DBG("%s ocf 0x%x", hdev->name, ocf);
305 307
306 switch (ocf) { 308 switch (ocf) {
309 case OCF_READ_LOCAL_VERSION:
310 lv = (struct hci_rp_read_loc_version *) skb->data;
311
312 if (lv->status) {
313 BT_DBG("%s READ_LOCAL_VERSION failed %d", hdev->name, lf->status);
314 break;
315 }
316
317 hdev->hci_ver = lv->hci_ver;
318 hdev->hci_rev = btohs(lv->hci_rev);
319 hdev->manufacturer = btohs(lv->manufacturer);
320
321 BT_DBG("%s: manufacturer %d hci_ver %d hci_rev %d", hdev->name,
322 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
323
324 break;
325
307 case OCF_READ_LOCAL_FEATURES: 326 case OCF_READ_LOCAL_FEATURES:
308 lf = (struct hci_rp_read_local_features *) skb->data; 327 lf = (struct hci_rp_read_local_features *) skb->data;
309 328
@@ -328,7 +347,8 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
328 if (hdev->features[1] & LMP_HV3) 347 if (hdev->features[1] & LMP_HV3)
329 hdev->pkt_type |= (HCI_HV3); 348 hdev->pkt_type |= (HCI_HV3);
330 349
331 BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, lf->features[0], lf->features[1], lf->features[2]); 350 BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name,
351 lf->features[0], lf->features[1], lf->features[2]);
332 352
333 break; 353 break;
334 354
@@ -394,9 +414,12 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
394 414
395 if (status) { 415 if (status) {
396 if (conn && conn->state == BT_CONNECT) { 416 if (conn && conn->state == BT_CONNECT) {
397 conn->state = BT_CLOSED; 417 if (status != 0x0c || conn->attempt > 2) {
398 hci_proto_connect_cfm(conn, status); 418 conn->state = BT_CLOSED;
399 hci_conn_del(conn); 419 hci_proto_connect_cfm(conn, status);
420 hci_conn_del(conn);
421 } else
422 conn->state = BT_CONNECT2;
400 } 423 }
401 } else { 424 } else {
402 if (!conn) { 425 if (!conn) {
@@ -708,7 +731,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
708static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 731static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
709{ 732{
710 struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; 733 struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data;
711 struct hci_conn *conn; 734 struct hci_conn *conn, *pend;
712 735
713 BT_DBG("%s", hdev->name); 736 BT_DBG("%s", hdev->name);
714 737
@@ -757,6 +780,10 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
757 780
758 hci_send_cmd(hdev, OGF_LINK_CTL, 781 hci_send_cmd(hdev, OGF_LINK_CTL,
759 OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); 782 OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
783 } else {
784 /* Update disconnect timer */
785 hci_conn_hold(conn);
786 hci_conn_put(conn);
760 } 787 }
761 } else 788 } else
762 conn->state = BT_CLOSED; 789 conn->state = BT_CLOSED;
@@ -777,6 +804,10 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
777 if (ev->status) 804 if (ev->status)
778 hci_conn_del(conn); 805 hci_conn_del(conn);
779 806
807 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
808 if (pend)
809 hci_acl_connect(pend);
810
780 hci_dev_unlock(hdev); 811 hci_dev_unlock(hdev);
781} 812}
782 813
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1a35d343e08a..f26a9eb49945 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -618,7 +618,7 @@ static int hci_sock_create(struct socket *sock, int protocol)
618 618
619 sock->ops = &hci_sock_ops; 619 sock->ops = &hci_sock_ops;
620 620
621 sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &hci_sk_proto, 1); 621 sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1);
622 if (!sk) 622 if (!sk)
623 return -ENOMEM; 623 return -ENOMEM;
624 624
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3987d167f04e..954eb74eb370 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -13,16 +13,32 @@
13#define BT_DBG(D...) 13#define BT_DBG(D...)
14#endif 14#endif
15 15
16static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 16static inline char *typetostr(int type)
17{ 17{
18 struct hci_dev *hdev = dev_get_drvdata(dev); 18 switch (type) {
19 return sprintf(buf, "%s\n", hdev->name); 19 case HCI_VIRTUAL:
20 return "VIRTUAL";
21 case HCI_USB:
22 return "USB";
23 case HCI_PCCARD:
24 return "PCCARD";
25 case HCI_UART:
26 return "UART";
27 case HCI_RS232:
28 return "RS232";
29 case HCI_PCI:
30 return "PCI";
31 case HCI_SDIO:
32 return "SDIO";
33 default:
34 return "UNKNOWN";
35 }
20} 36}
21 37
22static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 38static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
23{ 39{
24 struct hci_dev *hdev = dev_get_drvdata(dev); 40 struct hci_dev *hdev = dev_get_drvdata(dev);
25 return sprintf(buf, "%d\n", hdev->type); 41 return sprintf(buf, "%s\n", typetostr(hdev->type));
26} 42}
27 43
28static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 44static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
@@ -33,10 +49,22 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr, c
33 return sprintf(buf, "%s\n", batostr(&bdaddr)); 49 return sprintf(buf, "%s\n", batostr(&bdaddr));
34} 50}
35 51
36static ssize_t show_flags(struct device *dev, struct device_attribute *attr, char *buf) 52static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
37{ 53{
38 struct hci_dev *hdev = dev_get_drvdata(dev); 54 struct hci_dev *hdev = dev_get_drvdata(dev);
39 return sprintf(buf, "0x%lx\n", hdev->flags); 55 return sprintf(buf, "%d\n", hdev->manufacturer);
56}
57
58static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
59{
60 struct hci_dev *hdev = dev_get_drvdata(dev);
61 return sprintf(buf, "%d\n", hdev->hci_ver);
62}
63
64static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
65{
66 struct hci_dev *hdev = dev_get_drvdata(dev);
67 return sprintf(buf, "%d\n", hdev->hci_rev);
40} 68}
41 69
42static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf) 70static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf)
@@ -141,10 +169,11 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
141 return count; 169 return count;
142} 170}
143 171
144static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
145static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 172static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
146static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 173static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
147static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); 174static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
175static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
176static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
148static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL); 177static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL);
149 178
150static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 179static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
@@ -155,10 +184,11 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
155 show_sniff_min_interval, store_sniff_min_interval); 184 show_sniff_min_interval, store_sniff_min_interval);
156 185
157static struct device_attribute *bt_attrs[] = { 186static struct device_attribute *bt_attrs[] = {
158 &dev_attr_name,
159 &dev_attr_type, 187 &dev_attr_type,
160 &dev_attr_address, 188 &dev_attr_address,
161 &dev_attr_flags, 189 &dev_attr_manufacturer,
190 &dev_attr_hci_version,
191 &dev_attr_hci_revision,
162 &dev_attr_inquiry_cache, 192 &dev_attr_inquiry_cache,
163 &dev_attr_idle_timeout, 193 &dev_attr_idle_timeout,
164 &dev_attr_sniff_max_interval, 194 &dev_attr_sniff_max_interval,
@@ -166,6 +196,32 @@ static struct device_attribute *bt_attrs[] = {
166 NULL 196 NULL
167}; 197};
168 198
199static ssize_t show_conn_type(struct device *dev, struct device_attribute *attr, char *buf)
200{
201 struct hci_conn *conn = dev_get_drvdata(dev);
202 return sprintf(buf, "%s\n", conn->type == ACL_LINK ? "ACL" : "SCO");
203}
204
205static ssize_t show_conn_address(struct device *dev, struct device_attribute *attr, char *buf)
206{
207 struct hci_conn *conn = dev_get_drvdata(dev);
208 bdaddr_t bdaddr;
209 baswap(&bdaddr, &conn->dst);
210 return sprintf(buf, "%s\n", batostr(&bdaddr));
211}
212
213#define CONN_ATTR(_name,_mode,_show,_store) \
214struct device_attribute conn_attr_##_name = __ATTR(_name,_mode,_show,_store)
215
216static CONN_ATTR(type, S_IRUGO, show_conn_type, NULL);
217static CONN_ATTR(address, S_IRUGO, show_conn_address, NULL);
218
219static struct device_attribute *conn_attrs[] = {
220 &conn_attr_type,
221 &conn_attr_address,
222 NULL
223};
224
169struct class *bt_class = NULL; 225struct class *bt_class = NULL;
170EXPORT_SYMBOL_GPL(bt_class); 226EXPORT_SYMBOL_GPL(bt_class);
171 227
@@ -177,8 +233,61 @@ static struct platform_device *bt_platform;
177 233
178static void bt_release(struct device *dev) 234static void bt_release(struct device *dev)
179{ 235{
180 struct hci_dev *hdev = dev_get_drvdata(dev); 236 void *data = dev_get_drvdata(dev);
181 kfree(hdev); 237 kfree(data);
238}
239
240static void add_conn(void *data)
241{
242 struct hci_conn *conn = data;
243 int i;
244
245 if (device_register(&conn->dev) < 0) {
246 BT_ERR("Failed to register connection device");
247 return;
248 }
249
250 for (i = 0; conn_attrs[i]; i++)
251 if (device_create_file(&conn->dev, conn_attrs[i]) < 0)
252 BT_ERR("Failed to create connection attribute");
253}
254
255void hci_conn_add_sysfs(struct hci_conn *conn)
256{
257 struct hci_dev *hdev = conn->hdev;
258 bdaddr_t *ba = &conn->dst;
259
260 BT_DBG("conn %p", conn);
261
262 conn->dev.parent = &hdev->dev;
263 conn->dev.release = bt_release;
264
265 snprintf(conn->dev.bus_id, BUS_ID_SIZE,
266 "%s%2.2X%2.2X%2.2X%2.2X%2.2X%2.2X",
267 conn->type == ACL_LINK ? "acl" : "sco",
268 ba->b[5], ba->b[4], ba->b[3],
269 ba->b[2], ba->b[1], ba->b[0]);
270
271 dev_set_drvdata(&conn->dev, conn);
272
273 INIT_WORK(&conn->work, add_conn, (void *) conn);
274
275 schedule_work(&conn->work);
276}
277
278static void del_conn(void *data)
279{
280 struct hci_conn *conn = data;
281 device_del(&conn->dev);
282}
283
284void hci_conn_del_sysfs(struct hci_conn *conn)
285{
286 BT_DBG("conn %p", conn);
287
288 INIT_WORK(&conn->work, del_conn, (void *) conn);
289
290 schedule_work(&conn->work);
182} 291}
183 292
184int hci_register_sysfs(struct hci_dev *hdev) 293int hci_register_sysfs(struct hci_dev *hdev)
@@ -190,11 +299,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
190 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 299 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
191 300
192 dev->class = bt_class; 301 dev->class = bt_class;
193 302 dev->parent = hdev->parent;
194 if (hdev->parent)
195 dev->parent = hdev->parent;
196 else
197 dev->parent = &bt_platform->dev;
198 303
199 strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE); 304 strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE);
200 305
@@ -207,18 +312,17 @@ int hci_register_sysfs(struct hci_dev *hdev)
207 return err; 312 return err;
208 313
209 for (i = 0; bt_attrs[i]; i++) 314 for (i = 0; bt_attrs[i]; i++)
210 device_create_file(dev, bt_attrs[i]); 315 if (device_create_file(dev, bt_attrs[i]) < 0)
316 BT_ERR("Failed to create device attribute");
211 317
212 return 0; 318 return 0;
213} 319}
214 320
215void hci_unregister_sysfs(struct hci_dev *hdev) 321void hci_unregister_sysfs(struct hci_dev *hdev)
216{ 322{
217 struct device *dev = &hdev->dev;
218
219 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 323 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
220 324
221 device_del(dev); 325 device_del(&hdev->dev);
222} 326}
223 327
224int __init bt_sysfs_init(void) 328int __init bt_sysfs_init(void)
@@ -245,7 +349,7 @@ int __init bt_sysfs_init(void)
245 return 0; 349 return 0;
246} 350}
247 351
248void __exit bt_sysfs_cleanup(void) 352void bt_sysfs_cleanup(void)
249{ 353{
250 class_destroy(bt_class); 354 class_destroy(bt_class);
251 355
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index c6e3a2c27c6e..66782010f82c 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -40,6 +40,7 @@
40#include <linux/input.h> 40#include <linux/input.h>
41 41
42#include <net/bluetooth/bluetooth.h> 42#include <net/bluetooth/bluetooth.h>
43#include <net/bluetooth/hci_core.h>
43#include <net/bluetooth/l2cap.h> 44#include <net/bluetooth/l2cap.h>
44 45
45#include "hidp.h" 46#include "hidp.h"
@@ -506,14 +507,12 @@ static int hidp_session(void *arg)
506 507
507 hidp_del_timer(session); 508 hidp_del_timer(session);
508 509
509 if (intr_sk->sk_state != BT_CONNECTED) 510 fput(session->intr_sock->file);
510 wait_event_timeout(*(ctrl_sk->sk_sleep), (ctrl_sk->sk_state == BT_CLOSED), HZ);
511
512 fput(session->ctrl_sock->file);
513 511
514 wait_event_timeout(*(intr_sk->sk_sleep), (intr_sk->sk_state == BT_CLOSED), HZ); 512 wait_event_timeout(*(ctrl_sk->sk_sleep),
513 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
515 514
516 fput(session->intr_sock->file); 515 fput(session->ctrl_sock->file);
517 516
518 __hidp_unlink_session(session); 517 __hidp_unlink_session(session);
519 518
@@ -528,6 +527,24 @@ static int hidp_session(void *arg)
528 return 0; 527 return 0;
529} 528}
530 529
530static struct device *hidp_get_device(struct hidp_session *session)
531{
532 bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
533 bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
534 struct hci_dev *hdev;
535 struct hci_conn *conn;
536
537 hdev = hci_get_route(dst, src);
538 if (!hdev)
539 return NULL;
540
541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
542
543 hci_dev_put(hdev);
544
545 return conn ? &conn->dev : NULL;
546}
547
531static inline void hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) 548static inline void hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req)
532{ 549{
533 struct input_dev *input = session->input; 550 struct input_dev *input = session->input;
@@ -566,6 +583,8 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co
566 input->relbit[0] |= BIT(REL_WHEEL); 583 input->relbit[0] |= BIT(REL_WHEEL);
567 } 584 }
568 585
586 input->cdev.dev = hidp_get_device(session);
587
569 input->event = hidp_input_event; 588 input->event = hidp_input_event;
570 589
571 input_register_device(input); 590 input_register_device(input);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 099646e4e2ef..407fba43c1b9 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/compat.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include "hidp.h" 41#include "hidp.h"
@@ -143,11 +144,88 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
143 return -EINVAL; 144 return -EINVAL;
144} 145}
145 146
147#ifdef CONFIG_COMPAT
148struct compat_hidp_connadd_req {
149 int ctrl_sock; // Connected control socket
150 int intr_sock; // Connteted interrupt socket
151 __u16 parser;
152 __u16 rd_size;
153 compat_uptr_t rd_data;
154 __u8 country;
155 __u8 subclass;
156 __u16 vendor;
157 __u16 product;
158 __u16 version;
159 __u32 flags;
160 __u32 idle_to;
161 char name[128];
162};
163
164static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
165{
166 if (cmd == HIDPGETCONNLIST) {
167 struct hidp_connlist_req cl;
168 uint32_t uci;
169 int err;
170
171 if (get_user(cl.cnum, (uint32_t __user *) arg) ||
172 get_user(uci, (u32 __user *) (arg + 4)))
173 return -EFAULT;
174
175 cl.ci = compat_ptr(uci);
176
177 if (cl.cnum <= 0)
178 return -EINVAL;
179
180 err = hidp_get_connlist(&cl);
181
182 if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
183 err = -EFAULT;
184
185 return err;
186 } else if (cmd == HIDPCONNADD) {
187 struct compat_hidp_connadd_req ca;
188 struct hidp_connadd_req __user *uca;
189
190 uca = compat_alloc_user_space(sizeof(*uca));
191
192 if (copy_from_user(&ca, (void *) arg, sizeof(ca)))
193 return -EFAULT;
194
195 if (put_user(ca.ctrl_sock, &uca->ctrl_sock) ||
196 put_user(ca.intr_sock, &uca->intr_sock) ||
197 put_user(ca.parser, &uca->parser) ||
198 put_user(ca.rd_size, &uca->parser) ||
199 put_user(compat_ptr(ca.rd_data), &uca->rd_data) ||
200 put_user(ca.country, &uca->country) ||
201 put_user(ca.subclass, &uca->subclass) ||
202 put_user(ca.vendor, &uca->vendor) ||
203 put_user(ca.product, &uca->product) ||
204 put_user(ca.version, &uca->version) ||
205 put_user(ca.flags, &uca->flags) ||
206 put_user(ca.idle_to, &uca->idle_to) ||
207 copy_to_user(&uca->name[0], &ca.name[0], 128))
208 return -EFAULT;
209
210 arg = (unsigned long) uca;
211
212 /* Fall through. We don't actually write back any _changes_
213 to the structure anyway, so there's no need to copy back
214 into the original compat version */
215 }
216
217 return hidp_sock_ioctl(sock, cmd, arg);
218}
219#endif
220
146static const struct proto_ops hidp_sock_ops = { 221static const struct proto_ops hidp_sock_ops = {
147 .family = PF_BLUETOOTH, 222 .family = PF_BLUETOOTH,
148 .owner = THIS_MODULE, 223 .owner = THIS_MODULE,
149 .release = hidp_sock_release, 224 .release = hidp_sock_release,
150 .ioctl = hidp_sock_ioctl, 225 .ioctl = hidp_sock_ioctl,
226#ifdef CONFIG_COMPAT
227 .compat_ioctl = hidp_sock_compat_ioctl,
228#endif
151 .bind = sock_no_bind, 229 .bind = sock_no_bind,
152 .getname = sock_no_getname, 230 .getname = sock_no_getname,
153 .sendmsg = sock_no_sendmsg, 231 .sendmsg = sock_no_sendmsg,
@@ -178,7 +256,7 @@ static int hidp_sock_create(struct socket *sock, int protocol)
178 if (sock->type != SOCK_RAW) 256 if (sock->type != SOCK_RAW)
179 return -ESOCKTNOSUPPORT; 257 return -ESOCKTNOSUPPORT;
180 258
181 sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &hidp_proto, 1); 259 sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1);
182 if (!sk) 260 if (!sk)
183 return -ENOMEM; 261 return -ENOMEM;
184 262
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index d56f60b392ac..2b3dcb8f90fa 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -559,7 +559,7 @@ static int l2cap_sock_create(struct socket *sock, int protocol)
559 559
560 sock->ops = &l2cap_sock_ops; 560 sock->ops = &l2cap_sock_ops;
561 561
562 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL); 562 sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
563 if (!sk) 563 if (!sk)
564 return -ENOMEM; 564 return -ENOMEM;
565 565
@@ -2216,7 +2216,8 @@ static int __init l2cap_init(void)
2216 goto error; 2216 goto error;
2217 } 2217 }
2218 2218
2219 class_create_file(bt_class, &class_attr_l2cap); 2219 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2220 BT_ERR("Failed to create L2CAP info file");
2220 2221
2221 BT_INFO("L2CAP ver %s", VERSION); 2222 BT_INFO("L2CAP ver %s", VERSION);
2222 BT_INFO("L2CAP socket layer initialized"); 2223 BT_INFO("L2CAP socket layer initialized");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 332dd8f436ea..ddc4e9d5963e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -644,7 +644,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
644 addr.l2_family = AF_BLUETOOTH; 644 addr.l2_family = AF_BLUETOOTH;
645 addr.l2_psm = htobs(RFCOMM_PSM); 645 addr.l2_psm = htobs(RFCOMM_PSM);
646 *err = sock->ops->connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); 646 *err = sock->ops->connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
647 if (*err == 0 || *err == -EAGAIN) 647 if (*err == 0 || *err == -EINPROGRESS)
648 return s; 648 return s;
649 649
650 rfcomm_session_del(s); 650 rfcomm_session_del(s);
@@ -2058,7 +2058,8 @@ static int __init rfcomm_init(void)
2058 2058
2059 kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); 2059 kernel_thread(rfcomm_run, NULL, CLONE_KERNEL);
2060 2060
2061 class_create_file(bt_class, &class_attr_rfcomm_dlc); 2061 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
2062 BT_ERR("Failed to create RFCOMM info file");
2062 2063
2063 rfcomm_init_sockets(); 2064 rfcomm_init_sockets();
2064 2065
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 220fee04e7f2..544d65b7baa7 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -336,7 +336,8 @@ static int rfcomm_sock_create(struct socket *sock, int protocol)
336 336
337 sock->ops = &rfcomm_sock_ops; 337 sock->ops = &rfcomm_sock_ops;
338 338
339 if (!(sk = rfcomm_sock_alloc(sock, protocol, GFP_KERNEL))) 339 sk = rfcomm_sock_alloc(sock, protocol, GFP_ATOMIC);
340 if (!sk)
340 return -ENOMEM; 341 return -ENOMEM;
341 342
342 rfcomm_sock_init(sk, NULL); 343 rfcomm_sock_init(sk, NULL);
@@ -944,7 +945,8 @@ int __init rfcomm_init_sockets(void)
944 if (err < 0) 945 if (err < 0)
945 goto error; 946 goto error;
946 947
947 class_create_file(bt_class, &class_attr_rfcomm); 948 if (class_create_file(bt_class, &class_attr_rfcomm) < 0)
949 BT_ERR("Failed to create RFCOMM info file");
948 950
949 BT_INFO("RFCOMM socket layer initialized"); 951 BT_INFO("RFCOMM socket layer initialized");
950 952
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index bd8d671a0ba6..b8e3a5f1c8a8 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -38,6 +38,7 @@
38#include <linux/skbuff.h> 38#include <linux/skbuff.h>
39 39
40#include <net/bluetooth/bluetooth.h> 40#include <net/bluetooth/bluetooth.h>
41#include <net/bluetooth/hci_core.h>
41#include <net/bluetooth/rfcomm.h> 42#include <net/bluetooth/rfcomm.h>
42 43
43#ifndef CONFIG_BT_RFCOMM_DEBUG 44#ifndef CONFIG_BT_RFCOMM_DEBUG
@@ -161,6 +162,22 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
161 return dev; 162 return dev;
162} 163}
163 164
165static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
166{
167 struct hci_dev *hdev;
168 struct hci_conn *conn;
169
170 hdev = hci_get_route(&dev->dst, &dev->src);
171 if (!hdev)
172 return NULL;
173
174 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
175
176 hci_dev_put(hdev);
177
178 return conn ? &conn->dev : NULL;
179}
180
164static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) 181static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
165{ 182{
166 struct rfcomm_dev *dev; 183 struct rfcomm_dev *dev;
@@ -244,7 +261,7 @@ out:
244 return err; 261 return err;
245 } 262 }
246 263
247 tty_register_device(rfcomm_tty_driver, dev->id, NULL); 264 tty_register_device(rfcomm_tty_driver, dev->id, rfcomm_get_device(dev));
248 265
249 return dev->id; 266 return dev->id;
250} 267}
@@ -748,6 +765,9 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct termios *old)
748 765
749 BT_DBG("tty %p termios %p", tty, old); 766 BT_DBG("tty %p termios %p", tty, old);
750 767
768 if (!dev)
769 return;
770
751 /* Handle turning off CRTSCTS */ 771 /* Handle turning off CRTSCTS */
752 if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) 772 if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS))
753 BT_DBG("Turning off CRTSCTS unsupported"); 773 BT_DBG("Turning off CRTSCTS unsupported");
@@ -992,7 +1012,7 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsign
992 1012
993/* ---- TTY structure ---- */ 1013/* ---- TTY structure ---- */
994 1014
995static struct tty_operations rfcomm_ops = { 1015static const struct tty_operations rfcomm_ops = {
996 .open = rfcomm_tty_open, 1016 .open = rfcomm_tty_open,
997 .close = rfcomm_tty_close, 1017 .close = rfcomm_tty_close,
998 .write = rfcomm_tty_write, 1018 .write = rfcomm_tty_write,
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 7714a2ec3854..5d13d4f31753 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -452,7 +452,8 @@ static int sco_sock_create(struct socket *sock, int protocol)
452 452
453 sock->ops = &sco_sock_ops; 453 sock->ops = &sco_sock_ops;
454 454
455 if (!(sk = sco_sock_alloc(sock, protocol, GFP_KERNEL))) 455 sk = sco_sock_alloc(sock, protocol, GFP_ATOMIC);
456 if (!sk)
456 return -ENOMEM; 457 return -ENOMEM;
457 458
458 sco_sock_init(sk, NULL); 459 sco_sock_init(sk, NULL);
@@ -967,7 +968,8 @@ static int __init sco_init(void)
967 goto error; 968 goto error;
968 } 969 }
969 970
970 class_create_file(bt_class, &class_attr_sco); 971 if (class_create_file(bt_class, &class_attr_sco) < 0)
972 BT_ERR("Failed to create SCO info file");
971 973
972 BT_INFO("SCO (Voice Link) ver %s", VERSION); 974 BT_INFO("SCO (Voice Link) ver %s", VERSION);
973 BT_INFO("SCO socket layer initialized"); 975 BT_INFO("SCO socket layer initialized");
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 3a73b8c94271..d9f04864d15d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -128,7 +128,10 @@ void br_fdb_cleanup(unsigned long _data)
128 mod_timer(&br->gc_timer, jiffies + HZ/10); 128 mod_timer(&br->gc_timer, jiffies + HZ/10);
129} 129}
130 130
131void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p) 131
132void br_fdb_delete_by_port(struct net_bridge *br,
133 const struct net_bridge_port *p,
134 int do_all)
132{ 135{
133 int i; 136 int i;
134 137
@@ -142,6 +145,8 @@ void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
142 if (f->dst != p) 145 if (f->dst != p)
143 continue; 146 continue;
144 147
148 if (f->is_static && !do_all)
149 continue;
145 /* 150 /*
146 * if multiple ports all have the same device address 151 * if multiple ports all have the same device address
147 * then when one port is deleted, assign 152 * then when one port is deleted, assign
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b1211d5342f6..f753c40c11d2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -163,7 +163,7 @@ static void del_nbp(struct net_bridge_port *p)
163 br_stp_disable_port(p); 163 br_stp_disable_port(p);
164 spin_unlock_bh(&br->lock); 164 spin_unlock_bh(&br->lock);
165 165
166 br_fdb_delete_by_port(br, p); 166 br_fdb_delete_by_port(br, p, 1);
167 167
168 list_del_rcu(&p->list); 168 list_del_rcu(&p->list);
169 169
@@ -448,7 +448,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
448 448
449 return 0; 449 return 0;
450err2: 450err2:
451 br_fdb_delete_by_port(br, p); 451 br_fdb_delete_by_port(br, p, 1);
452err1: 452err1:
453 kobject_del(&p->kobj); 453 kobject_del(&p->kobj);
454err0: 454err0:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c491fb2f280e..74258d86f256 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -143,7 +143,7 @@ extern void br_fdb_changeaddr(struct net_bridge_port *p,
143 const unsigned char *newaddr); 143 const unsigned char *newaddr);
144extern void br_fdb_cleanup(unsigned long arg); 144extern void br_fdb_cleanup(unsigned long arg);
145extern void br_fdb_delete_by_port(struct net_bridge *br, 145extern void br_fdb_delete_by_port(struct net_bridge *br,
146 struct net_bridge_port *p); 146 const struct net_bridge_port *p, int do_all);
147extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 147extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
148 const unsigned char *addr); 148 const unsigned char *addr);
149extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, 149extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 14cd025079af..d294224592db 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -113,6 +113,8 @@ void br_stp_disable_port(struct net_bridge_port *p)
113 del_timer(&p->forward_delay_timer); 113 del_timer(&p->forward_delay_timer);
114 del_timer(&p->hold_timer); 114 del_timer(&p->hold_timer);
115 115
116 br_fdb_delete_by_port(br, p, 0);
117
116 br_configuration_update(br); 118 br_configuration_update(br);
117 119
118 br_port_state_selection(br); 120 br_port_state_selection(br);
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index d19fc4b328dc..0aa7b9910a86 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -20,7 +20,7 @@ static int ebt_target_reply(struct sk_buff **pskb, unsigned int hooknr,
20 const void *data, unsigned int datalen) 20 const void *data, unsigned int datalen)
21{ 21{
22 struct ebt_arpreply_info *info = (struct ebt_arpreply_info *)data; 22 struct ebt_arpreply_info *info = (struct ebt_arpreply_info *)data;
23 u32 _sip, *siptr, _dip, *diptr; 23 __be32 _sip, *siptr, _dip, *diptr;
24 struct arphdr _ah, *ap; 24 struct arphdr _ah, *ap;
25 unsigned char _sha[ETH_ALEN], *shp; 25 unsigned char _sha[ETH_ALEN], *shp;
26 struct sk_buff *skb = *pskb; 26 struct sk_buff *skb = *pskb;
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 770c0df972a3..b54306a934e5 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -22,24 +22,37 @@ static int ebt_target_mark(struct sk_buff **pskb, unsigned int hooknr,
22 const void *data, unsigned int datalen) 22 const void *data, unsigned int datalen)
23{ 23{
24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
25 int action = info->target & -16;
25 26
26 if ((*pskb)->nfmark != info->mark) 27 if (action == MARK_SET_VALUE)
27 (*pskb)->nfmark = info->mark; 28 (*pskb)->nfmark = info->mark;
29 else if (action == MARK_OR_VALUE)
30 (*pskb)->nfmark |= info->mark;
31 else if (action == MARK_AND_VALUE)
32 (*pskb)->nfmark &= info->mark;
33 else
34 (*pskb)->nfmark ^= info->mark;
28 35
29 return info->target; 36 return info->target | -16;
30} 37}
31 38
32static int ebt_target_mark_check(const char *tablename, unsigned int hookmask, 39static int ebt_target_mark_check(const char *tablename, unsigned int hookmask,
33 const struct ebt_entry *e, void *data, unsigned int datalen) 40 const struct ebt_entry *e, void *data, unsigned int datalen)
34{ 41{
35 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 42 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
43 int tmp;
36 44
37 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info))) 45 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info)))
38 return -EINVAL; 46 return -EINVAL;
39 if (BASE_CHAIN && info->target == EBT_RETURN) 47 tmp = info->target | -16;
48 if (BASE_CHAIN && tmp == EBT_RETURN)
40 return -EINVAL; 49 return -EINVAL;
41 CLEAR_BASE_CHAIN_BIT; 50 CLEAR_BASE_CHAIN_BIT;
42 if (INVALID_TARGET) 51 if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
52 return -EINVAL;
53 tmp = info->target & -16;
54 if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
55 tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
43 return -EINVAL; 56 return -EINVAL;
44 return 0; 57 return 0;
45} 58}
diff --git a/net/compat.c b/net/compat.c
index d5d69fa15d07..52d32f1bc728 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -285,8 +285,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
285 285
286 if (i > 0) { 286 if (i > 0) {
287 int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); 287 int cmlen = CMSG_COMPAT_LEN(i * sizeof(int));
288 if (!err) 288 err = put_user(SOL_SOCKET, &cm->cmsg_level);
289 err = put_user(SOL_SOCKET, &cm->cmsg_level);
290 if (!err) 289 if (!err)
291 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 290 err = put_user(SCM_RIGHTS, &cm->cmsg_type);
292 if (!err) 291 if (!err)
diff --git a/net/core/dev.c b/net/core/dev.c
index 14de297d024d..81c426adcd1e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1480,14 +1480,16 @@ gso:
1480 if (q->enqueue) { 1480 if (q->enqueue) {
1481 /* Grab device queue */ 1481 /* Grab device queue */
1482 spin_lock(&dev->queue_lock); 1482 spin_lock(&dev->queue_lock);
1483 q = dev->qdisc;
1484 if (q->enqueue) {
1485 rc = q->enqueue(skb, q);
1486 qdisc_run(dev);
1487 spin_unlock(&dev->queue_lock);
1483 1488
1484 rc = q->enqueue(skb, q); 1489 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1485 1490 goto out;
1486 qdisc_run(dev); 1491 }
1487
1488 spin_unlock(&dev->queue_lock); 1492 spin_unlock(&dev->queue_lock);
1489 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1490 goto out;
1491 } 1493 }
1492 1494
1493 /* The device has no queue. Common case for software devices: 1495 /* The device has no queue. Common case for software devices:
@@ -3500,8 +3502,6 @@ static int __init net_dev_init(void)
3500 3502
3501 BUG_ON(!dev_boot_phase); 3503 BUG_ON(!dev_boot_phase);
3502 3504
3503 net_random_init();
3504
3505 if (dev_proc_init()) 3505 if (dev_proc_init())
3506 goto out; 3506 goto out;
3507 3507
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e0ca04f38cef..87dc556fd9d6 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -806,13 +806,6 @@ int dev_ethtool(struct ifreq *ifr)
806 int rc; 806 int rc;
807 unsigned long old_features; 807 unsigned long old_features;
808 808
809 /*
810 * XXX: This can be pushed down into the ethtool_* handlers that
811 * need it. Keep existing behaviour for the moment.
812 */
813 if (!capable(CAP_NET_ADMIN))
814 return -EPERM;
815
816 if (!dev || !netif_device_present(dev)) 809 if (!dev || !netif_device_present(dev))
817 return -ENODEV; 810 return -ENODEV;
818 811
@@ -822,6 +815,27 @@ int dev_ethtool(struct ifreq *ifr)
822 if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd))) 815 if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
823 return -EFAULT; 816 return -EFAULT;
824 817
818 /* Allow some commands to be done by anyone */
819 switch(ethcmd) {
820 case ETHTOOL_GDRVINFO:
821 case ETHTOOL_GMSGLVL:
822 case ETHTOOL_GCOALESCE:
823 case ETHTOOL_GRINGPARAM:
824 case ETHTOOL_GPAUSEPARAM:
825 case ETHTOOL_GRXCSUM:
826 case ETHTOOL_GTXCSUM:
827 case ETHTOOL_GSG:
828 case ETHTOOL_GSTRINGS:
829 case ETHTOOL_GTSO:
830 case ETHTOOL_GPERMADDR:
831 case ETHTOOL_GUFO:
832 case ETHTOOL_GGSO:
833 break;
834 default:
835 if (!capable(CAP_NET_ADMIN))
836 return -EPERM;
837 }
838
825 if(dev->ethtool_ops->begin) 839 if(dev->ethtool_ops->begin)
826 if ((rc = dev->ethtool_ops->begin(dev)) < 0) 840 if ((rc = dev->ethtool_ops->begin(dev)) < 0)
827 return rc; 841 return rc;
@@ -947,6 +961,10 @@ int dev_ethtool(struct ifreq *ifr)
947 return rc; 961 return rc;
948 962
949 ioctl: 963 ioctl:
964 /* Keep existing behaviour for the moment. */
965 if (!capable(CAP_NET_ADMIN))
966 return -EPERM;
967
950 if (dev->do_ioctl) 968 if (dev->do_ioctl)
951 return dev->do_ioctl(dev, ifr, SIOCETHTOOL); 969 return dev->do_ioctl(dev, ifr, SIOCETHTOOL);
952 return -EOPNOTSUPP; 970 return -EOPNOTSUPP;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index a99d87d82b7f..6b0e63cacd93 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -8,7 +8,6 @@
8 * Authors: Thomas Graf <tgraf@suug.ch> 8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */ 9 */
10 10
11#include <linux/config.h>
12#include <linux/types.h> 11#include <linux/types.h>
13#include <linux/kernel.h> 12#include <linux/kernel.h>
14#include <linux/list.h> 13#include <linux/list.h>
diff --git a/net/core/flow.c b/net/core/flow.c
index f23e7e386543..b16d31ae5e54 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -85,6 +85,14 @@ static void flow_cache_new_hashrnd(unsigned long arg)
85 add_timer(&flow_hash_rnd_timer); 85 add_timer(&flow_hash_rnd_timer);
86} 86}
87 87
88static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
89{
90 if (fle->object)
91 atomic_dec(fle->object_ref);
92 kmem_cache_free(flow_cachep, fle);
93 flow_count(cpu)--;
94}
95
88static void __flow_cache_shrink(int cpu, int shrink_to) 96static void __flow_cache_shrink(int cpu, int shrink_to)
89{ 97{
90 struct flow_cache_entry *fle, **flp; 98 struct flow_cache_entry *fle, **flp;
@@ -100,10 +108,7 @@ static void __flow_cache_shrink(int cpu, int shrink_to)
100 } 108 }
101 while ((fle = *flp) != NULL) { 109 while ((fle = *flp) != NULL) {
102 *flp = fle->next; 110 *flp = fle->next;
103 if (fle->object) 111 flow_entry_kill(cpu, fle);
104 atomic_dec(fle->object_ref);
105 kmem_cache_free(flow_cachep, fle);
106 flow_count(cpu)--;
107 } 112 }
108 } 113 }
109} 114}
@@ -220,24 +225,33 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
220 225
221nocache: 226nocache:
222 { 227 {
228 int err;
223 void *obj; 229 void *obj;
224 atomic_t *obj_ref; 230 atomic_t *obj_ref;
225 231
226 resolver(key, family, dir, &obj, &obj_ref); 232 err = resolver(key, family, dir, &obj, &obj_ref);
227 233
228 if (fle) { 234 if (fle) {
229 fle->genid = atomic_read(&flow_cache_genid); 235 if (err) {
230 236 /* Force security policy check on next lookup */
231 if (fle->object) 237 *head = fle->next;
232 atomic_dec(fle->object_ref); 238 flow_entry_kill(cpu, fle);
233 239 } else {
234 fle->object = obj; 240 fle->genid = atomic_read(&flow_cache_genid);
235 fle->object_ref = obj_ref; 241
236 if (obj) 242 if (fle->object)
237 atomic_inc(fle->object_ref); 243 atomic_dec(fle->object_ref);
244
245 fle->object = obj;
246 fle->object_ref = obj_ref;
247 if (obj)
248 atomic_inc(fle->object_ref);
249 }
238 } 250 }
239 local_bh_enable(); 251 local_bh_enable();
240 252
253 if (err)
254 obj = ERR_PTR(err);
241 return obj; 255 return obj;
242 } 256 }
243} 257}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b6c69e1463e8..b4b478353b27 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -344,12 +344,12 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
344{ 344{
345 struct neighbour *n; 345 struct neighbour *n;
346 int key_len = tbl->key_len; 346 int key_len = tbl->key_len;
347 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; 347 u32 hash_val = tbl->hash(pkey, dev);
348 348
349 NEIGH_CACHE_STAT_INC(tbl, lookups); 349 NEIGH_CACHE_STAT_INC(tbl, lookups);
350 350
351 read_lock_bh(&tbl->lock); 351 read_lock_bh(&tbl->lock);
352 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { 352 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { 353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
354 neigh_hold(n); 354 neigh_hold(n);
355 NEIGH_CACHE_STAT_INC(tbl, hits); 355 NEIGH_CACHE_STAT_INC(tbl, hits);
@@ -364,12 +364,12 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
364{ 364{
365 struct neighbour *n; 365 struct neighbour *n;
366 int key_len = tbl->key_len; 366 int key_len = tbl->key_len;
367 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask; 367 u32 hash_val = tbl->hash(pkey, NULL);
368 368
369 NEIGH_CACHE_STAT_INC(tbl, lookups); 369 NEIGH_CACHE_STAT_INC(tbl, lookups);
370 370
371 read_lock_bh(&tbl->lock); 371 read_lock_bh(&tbl->lock);
372 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { 372 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
373 if (!memcmp(n->primary_key, pkey, key_len)) { 373 if (!memcmp(n->primary_key, pkey, key_len)) {
374 neigh_hold(n); 374 neigh_hold(n);
375 NEIGH_CACHE_STAT_INC(tbl, hits); 375 NEIGH_CACHE_STAT_INC(tbl, hits);
@@ -1079,7 +1079,7 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1079} 1079}
1080 1080
1081static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, 1081static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1082 u16 protocol) 1082 __be16 protocol)
1083{ 1083{
1084 struct hh_cache *hh; 1084 struct hh_cache *hh;
1085 struct net_device *dev = dst->dev; 1085 struct net_device *dev = dst->dev;
@@ -1998,12 +1998,12 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1998 int rc, h, s_h = cb->args[1]; 1998 int rc, h, s_h = cb->args[1];
1999 int idx, s_idx = idx = cb->args[2]; 1999 int idx, s_idx = idx = cb->args[2];
2000 2000
2001 read_lock_bh(&tbl->lock);
2001 for (h = 0; h <= tbl->hash_mask; h++) { 2002 for (h = 0; h <= tbl->hash_mask; h++) {
2002 if (h < s_h) 2003 if (h < s_h)
2003 continue; 2004 continue;
2004 if (h > s_h) 2005 if (h > s_h)
2005 s_idx = 0; 2006 s_idx = 0;
2006 read_lock_bh(&tbl->lock);
2007 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) { 2007 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2008 if (idx < s_idx) 2008 if (idx < s_idx)
2009 continue; 2009 continue;
@@ -2016,8 +2016,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2016 goto out; 2016 goto out;
2017 } 2017 }
2018 } 2018 }
2019 read_unlock_bh(&tbl->lock);
2020 } 2019 }
2020 read_unlock_bh(&tbl->lock);
2021 rc = skb->len; 2021 rc = skb->len;
2022out: 2022out:
2023 cb->args[1] = h; 2023 cb->args[1] = h;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 13472762b18b..f47f319bb7dc 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -344,8 +344,6 @@ static ssize_t wireless_show(struct class_device *cd, char *buf,
344 if(dev->wireless_handlers && 344 if(dev->wireless_handlers &&
345 dev->wireless_handlers->get_wireless_stats) 345 dev->wireless_handlers->get_wireless_stats)
346 iw = dev->wireless_handlers->get_wireless_stats(dev); 346 iw = dev->wireless_handlers->get_wireless_stats(dev);
347 else if (dev->get_wireless_stats)
348 iw = dev->get_wireless_stats(dev);
349 if (iw != NULL) 347 if (iw != NULL)
350 ret = (*format)(iw, buf); 348 ret = (*format)(iw, buf);
351 } 349 }
@@ -465,8 +463,7 @@ int netdev_register_sysfs(struct net_device *net)
465 *groups++ = &netstat_group; 463 *groups++ = &netstat_group;
466 464
467#ifdef WIRELESS_EXT 465#ifdef WIRELESS_EXT
468 if (net->get_wireless_stats 466 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
469 || (net->wireless_handlers && net->wireless_handlers->get_wireless_stats))
470 *groups++ = &wireless_group; 467 *groups++ = &wireless_group;
471#endif 468#endif
472 469
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ead5920c26d6..9308af060b44 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -335,13 +335,13 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
335 memcpy(skb->data, msg, len); 335 memcpy(skb->data, msg, len);
336 skb->len += len; 336 skb->len += len;
337 337
338 udph = (struct udphdr *) skb_push(skb, sizeof(*udph)); 338 skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
339 udph->source = htons(np->local_port); 339 udph->source = htons(np->local_port);
340 udph->dest = htons(np->remote_port); 340 udph->dest = htons(np->remote_port);
341 udph->len = htons(udp_len); 341 udph->len = htons(udp_len);
342 udph->check = 0; 342 udph->check = 0;
343 343
344 iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); 344 skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
345 345
346 /* iph->version = 4; iph->ihl = 5; */ 346 /* iph->version = 4; iph->ihl = 5; */
347 put_unaligned(0x45, (unsigned char *)iph); 347 put_unaligned(0x45, (unsigned char *)iph);
@@ -357,8 +357,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
357 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 357 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
358 358
359 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); 359 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
360 360 skb->mac.raw = skb->data;
361 eth->h_proto = htons(ETH_P_IP); 361 skb->protocol = eth->h_proto = htons(ETH_P_IP);
362 memcpy(eth->h_source, np->local_mac, 6); 362 memcpy(eth->h_source, np->local_mac, 6);
363 memcpy(eth->h_dest, np->remote_mac, 6); 363 memcpy(eth->h_dest, np->remote_mac, 6);
364 364
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 72145d4a2600..dd023fd28304 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -109,6 +109,8 @@
109 * 109 *
110 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 110 * MPLS support by Steven Whitehouse <steve@chygwyn.com>
111 * 111 *
112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
113 *
112 */ 114 */
113#include <linux/sys.h> 115#include <linux/sys.h>
114#include <linux/types.h> 116#include <linux/types.h>
@@ -137,6 +139,7 @@
137#include <linux/inetdevice.h> 139#include <linux/inetdevice.h>
138#include <linux/rtnetlink.h> 140#include <linux/rtnetlink.h>
139#include <linux/if_arp.h> 141#include <linux/if_arp.h>
142#include <linux/if_vlan.h>
140#include <linux/in.h> 143#include <linux/in.h>
141#include <linux/ip.h> 144#include <linux/ip.h>
142#include <linux/ipv6.h> 145#include <linux/ipv6.h>
@@ -157,7 +160,7 @@
157#include <asm/div64.h> /* do_div */ 160#include <asm/div64.h> /* do_div */
158#include <asm/timex.h> 161#include <asm/timex.h>
159 162
160#define VERSION "pktgen v2.67: Packet Generator for packet performance testing.\n" 163#define VERSION "pktgen v2.68: Packet Generator for packet performance testing.\n"
161 164
162/* #define PG_DEBUG(a) a */ 165/* #define PG_DEBUG(a) a */
163#define PG_DEBUG(a) 166#define PG_DEBUG(a)
@@ -178,6 +181,8 @@
178#define F_TXSIZE_RND (1<<6) /* Transmit size is random */ 181#define F_TXSIZE_RND (1<<6) /* Transmit size is random */
179#define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ 182#define F_IPV6 (1<<7) /* Interface in IPV6 Mode */
180#define F_MPLS_RND (1<<8) /* Random MPLS labels */ 183#define F_MPLS_RND (1<<8) /* Random MPLS labels */
184#define F_VID_RND (1<<9) /* Random VLAN ID */
185#define F_SVID_RND (1<<10) /* Random SVLAN ID */
181 186
182/* Thread control flag bits */ 187/* Thread control flag bits */
183#define T_TERMINATE (1<<0) 188#define T_TERMINATE (1<<0)
@@ -198,6 +203,9 @@ static struct proc_dir_entry *pg_proc_dir = NULL;
198 203
199#define MAX_CFLOWS 65536 204#define MAX_CFLOWS 65536
200 205
206#define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4)
207#define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4)
208
201struct flow_state { 209struct flow_state {
202 __u32 cur_daddr; 210 __u32 cur_daddr;
203 int count; 211 int count;
@@ -284,10 +292,23 @@ struct pktgen_dev {
284 __u16 udp_dst_min; /* inclusive, dest UDP port */ 292 __u16 udp_dst_min; /* inclusive, dest UDP port */
285 __u16 udp_dst_max; /* exclusive, dest UDP port */ 293 __u16 udp_dst_max; /* exclusive, dest UDP port */
286 294
295 /* DSCP + ECN */
296 __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */
297 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */
298
287 /* MPLS */ 299 /* MPLS */
288 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 300 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */
289 __be32 labels[MAX_MPLS_LABELS]; 301 __be32 labels[MAX_MPLS_LABELS];
290 302
303 /* VLAN/SVLAN (802.1Q/Q-in-Q) */
304 __u8 vlan_p;
305 __u8 vlan_cfi;
306 __u16 vlan_id; /* 0xffff means no vlan tag */
307
308 __u8 svlan_p;
309 __u8 svlan_cfi;
310 __u16 svlan_id; /* 0xffff means no svlan tag */
311
291 __u32 src_mac_count; /* How many MACs to iterate through */ 312 __u32 src_mac_count; /* How many MACs to iterate through */
292 __u32 dst_mac_count; /* How many MACs to iterate through */ 313 __u32 dst_mac_count; /* How many MACs to iterate through */
293 314
@@ -644,6 +665,24 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
644 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 665 i == pkt_dev->nr_labels-1 ? "\n" : ", ");
645 } 666 }
646 667
668 if (pkt_dev->vlan_id != 0xffff) {
669 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n",
670 pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi);
671 }
672
673 if (pkt_dev->svlan_id != 0xffff) {
674 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n",
675 pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi);
676 }
677
678 if (pkt_dev->tos) {
679 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos);
680 }
681
682 if (pkt_dev->traffic_class) {
683 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
684 }
685
647 seq_printf(seq, " Flags: "); 686 seq_printf(seq, " Flags: ");
648 687
649 if (pkt_dev->flags & F_IPV6) 688 if (pkt_dev->flags & F_IPV6)
@@ -673,6 +712,12 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
673 if (pkt_dev->flags & F_MACDST_RND) 712 if (pkt_dev->flags & F_MACDST_RND)
674 seq_printf(seq, "MACDST_RND "); 713 seq_printf(seq, "MACDST_RND ");
675 714
715 if (pkt_dev->flags & F_VID_RND)
716 seq_printf(seq, "VID_RND ");
717
718 if (pkt_dev->flags & F_SVID_RND)
719 seq_printf(seq, "SVID_RND ");
720
676 seq_puts(seq, "\n"); 721 seq_puts(seq, "\n");
677 722
678 sa = pkt_dev->started_at; 723 sa = pkt_dev->started_at;
@@ -715,12 +760,12 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
715} 760}
716 761
717 762
718static int hex32_arg(const char __user *user_buffer, __u32 *num) 763static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num)
719{ 764{
720 int i = 0; 765 int i = 0;
721 *num = 0; 766 *num = 0;
722 767
723 for(; i < 8; i++) { 768 for(; i < maxlen; i++) {
724 char c; 769 char c;
725 *num <<= 4; 770 *num <<= 4;
726 if (get_user(c, &user_buffer[i])) 771 if (get_user(c, &user_buffer[i]))
@@ -815,7 +860,7 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
815 pkt_dev->nr_labels = 0; 860 pkt_dev->nr_labels = 0;
816 do { 861 do {
817 __u32 tmp; 862 __u32 tmp;
818 len = hex32_arg(&buffer[i], &tmp); 863 len = hex32_arg(&buffer[i], 8, &tmp);
819 if (len <= 0) 864 if (len <= 0)
820 return len; 865 return len;
821 pkt_dev->labels[n] = htonl(tmp); 866 pkt_dev->labels[n] = htonl(tmp);
@@ -1140,11 +1185,27 @@ static ssize_t pktgen_if_write(struct file *file,
1140 else if (strcmp(f, "!MPLS_RND") == 0) 1185 else if (strcmp(f, "!MPLS_RND") == 0)
1141 pkt_dev->flags &= ~F_MPLS_RND; 1186 pkt_dev->flags &= ~F_MPLS_RND;
1142 1187
1188 else if (strcmp(f, "VID_RND") == 0)
1189 pkt_dev->flags |= F_VID_RND;
1190
1191 else if (strcmp(f, "!VID_RND") == 0)
1192 pkt_dev->flags &= ~F_VID_RND;
1193
1194 else if (strcmp(f, "SVID_RND") == 0)
1195 pkt_dev->flags |= F_SVID_RND;
1196
1197 else if (strcmp(f, "!SVID_RND") == 0)
1198 pkt_dev->flags &= ~F_SVID_RND;
1199
1200 else if (strcmp(f, "!IPV6") == 0)
1201 pkt_dev->flags &= ~F_IPV6;
1202
1143 else { 1203 else {
1144 sprintf(pg_result, 1204 sprintf(pg_result,
1145 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1205 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1146 f, 1206 f,
1147 "IPSRC_RND, IPDST_RND, TXSIZE_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND\n"); 1207 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1208 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND\n");
1148 return count; 1209 return count;
1149 } 1210 }
1150 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1211 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -1445,6 +1506,160 @@ static ssize_t pktgen_if_write(struct file *file,
1445 offset += sprintf(pg_result + offset, 1506 offset += sprintf(pg_result + offset,
1446 "%08x%s", ntohl(pkt_dev->labels[n]), 1507 "%08x%s", ntohl(pkt_dev->labels[n]),
1447 n == pkt_dev->nr_labels-1 ? "" : ","); 1508 n == pkt_dev->nr_labels-1 ? "" : ",");
1509
1510 if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) {
1511 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1512 pkt_dev->svlan_id = 0xffff;
1513
1514 if (debug)
1515 printk("pktgen: VLAN/SVLAN auto turned off\n");
1516 }
1517 return count;
1518 }
1519
1520 if (!strcmp(name, "vlan_id")) {
1521 len = num_arg(&user_buffer[i], 4, &value);
1522 if (len < 0) {
1523 return len;
1524 }
1525 i += len;
1526 if (value <= 4095) {
1527 pkt_dev->vlan_id = value; /* turn on VLAN */
1528
1529 if (debug)
1530 printk("pktgen: VLAN turned on\n");
1531
1532 if (debug && pkt_dev->nr_labels)
1533 printk("pktgen: MPLS auto turned off\n");
1534
1535 pkt_dev->nr_labels = 0; /* turn off MPLS */
1536 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
1537 } else {
1538 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1539 pkt_dev->svlan_id = 0xffff;
1540
1541 if (debug)
1542 printk("pktgen: VLAN/SVLAN turned off\n");
1543 }
1544 return count;
1545 }
1546
1547 if (!strcmp(name, "vlan_p")) {
1548 len = num_arg(&user_buffer[i], 1, &value);
1549 if (len < 0) {
1550 return len;
1551 }
1552 i += len;
1553 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
1554 pkt_dev->vlan_p = value;
1555 sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p);
1556 } else {
1557 sprintf(pg_result, "ERROR: vlan_p must be 0-7");
1558 }
1559 return count;
1560 }
1561
1562 if (!strcmp(name, "vlan_cfi")) {
1563 len = num_arg(&user_buffer[i], 1, &value);
1564 if (len < 0) {
1565 return len;
1566 }
1567 i += len;
1568 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
1569 pkt_dev->vlan_cfi = value;
1570 sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi);
1571 } else {
1572 sprintf(pg_result, "ERROR: vlan_cfi must be 0-1");
1573 }
1574 return count;
1575 }
1576
1577 if (!strcmp(name, "svlan_id")) {
1578 len = num_arg(&user_buffer[i], 4, &value);
1579 if (len < 0) {
1580 return len;
1581 }
1582 i += len;
1583 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
1584 pkt_dev->svlan_id = value; /* turn on SVLAN */
1585
1586 if (debug)
1587 printk("pktgen: SVLAN turned on\n");
1588
1589 if (debug && pkt_dev->nr_labels)
1590 printk("pktgen: MPLS auto turned off\n");
1591
1592 pkt_dev->nr_labels = 0; /* turn off MPLS */
1593 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
1594 } else {
1595 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
1596 pkt_dev->svlan_id = 0xffff;
1597
1598 if (debug)
1599 printk("pktgen: VLAN/SVLAN turned off\n");
1600 }
1601 return count;
1602 }
1603
1604 if (!strcmp(name, "svlan_p")) {
1605 len = num_arg(&user_buffer[i], 1, &value);
1606 if (len < 0) {
1607 return len;
1608 }
1609 i += len;
1610 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
1611 pkt_dev->svlan_p = value;
1612 sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p);
1613 } else {
1614 sprintf(pg_result, "ERROR: svlan_p must be 0-7");
1615 }
1616 return count;
1617 }
1618
1619 if (!strcmp(name, "svlan_cfi")) {
1620 len = num_arg(&user_buffer[i], 1, &value);
1621 if (len < 0) {
1622 return len;
1623 }
1624 i += len;
1625 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
1626 pkt_dev->svlan_cfi = value;
1627 sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi);
1628 } else {
1629 sprintf(pg_result, "ERROR: svlan_cfi must be 0-1");
1630 }
1631 return count;
1632 }
1633
1634 if (!strcmp(name, "tos")) {
1635 __u32 tmp_value = 0;
1636 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1637 if (len < 0) {
1638 return len;
1639 }
1640 i += len;
1641 if (len == 2) {
1642 pkt_dev->tos = tmp_value;
1643 sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos);
1644 } else {
1645 sprintf(pg_result, "ERROR: tos must be 00-ff");
1646 }
1647 return count;
1648 }
1649
1650 if (!strcmp(name, "traffic_class")) {
1651 __u32 tmp_value = 0;
1652 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1653 if (len < 0) {
1654 return len;
1655 }
1656 i += len;
1657 if (len == 2) {
1658 pkt_dev->traffic_class = tmp_value;
1659 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class);
1660 } else {
1661 sprintf(pg_result, "ERROR: traffic_class must be 00-ff");
1662 }
1448 return count; 1663 return count;
1449 } 1664 }
1450 1665
@@ -1949,6 +2164,14 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
1949 htonl(0x000fffff)); 2164 htonl(0x000fffff));
1950 } 2165 }
1951 2166
2167 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
2168 pkt_dev->vlan_id = pktgen_random() % 4096;
2169 }
2170
2171 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
2172 pkt_dev->svlan_id = pktgen_random() % 4096;
2173 }
2174
1952 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2175 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
1953 if (pkt_dev->flags & F_UDPSRC_RND) 2176 if (pkt_dev->flags & F_UDPSRC_RND)
1954 pkt_dev->cur_udp_src = 2177 pkt_dev->cur_udp_src =
@@ -2092,10 +2315,18 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2092 struct pktgen_hdr *pgh = NULL; 2315 struct pktgen_hdr *pgh = NULL;
2093 __be16 protocol = __constant_htons(ETH_P_IP); 2316 __be16 protocol = __constant_htons(ETH_P_IP);
2094 __be32 *mpls; 2317 __be32 *mpls;
2318 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
2319 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2320 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2321 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2322
2095 2323
2096 if (pkt_dev->nr_labels) 2324 if (pkt_dev->nr_labels)
2097 protocol = __constant_htons(ETH_P_MPLS_UC); 2325 protocol = __constant_htons(ETH_P_MPLS_UC);
2098 2326
2327 if (pkt_dev->vlan_id != 0xffff)
2328 protocol = __constant_htons(ETH_P_8021Q);
2329
2099 /* Update any of the values, used when we're incrementing various 2330 /* Update any of the values, used when we're incrementing various
2100 * fields. 2331 * fields.
2101 */ 2332 */
@@ -2103,7 +2334,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2103 2334
2104 datalen = (odev->hard_header_len + 16) & ~0xf; 2335 datalen = (odev->hard_header_len + 16) & ~0xf;
2105 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2336 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen +
2106 pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); 2337 pkt_dev->nr_labels*sizeof(u32) +
2338 VLAN_TAG_SIZE(pkt_dev) + SVLAN_TAG_SIZE(pkt_dev),
2339 GFP_ATOMIC);
2107 if (!skb) { 2340 if (!skb) {
2108 sprintf(pkt_dev->result, "No memory"); 2341 sprintf(pkt_dev->result, "No memory");
2109 return NULL; 2342 return NULL;
@@ -2116,6 +2349,24 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2116 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2349 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32));
2117 if (pkt_dev->nr_labels) 2350 if (pkt_dev->nr_labels)
2118 mpls_push(mpls, pkt_dev); 2351 mpls_push(mpls, pkt_dev);
2352
2353 if (pkt_dev->vlan_id != 0xffff) {
2354 if(pkt_dev->svlan_id != 0xffff) {
2355 svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
2356 *svlan_tci = htons(pkt_dev->svlan_id);
2357 *svlan_tci |= pkt_dev->svlan_p << 5;
2358 *svlan_tci |= pkt_dev->svlan_cfi << 4;
2359 svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
2360 *svlan_encapsulated_proto = __constant_htons(ETH_P_8021Q);
2361 }
2362 vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
2363 *vlan_tci = htons(pkt_dev->vlan_id);
2364 *vlan_tci |= pkt_dev->vlan_p << 5;
2365 *vlan_tci |= pkt_dev->vlan_cfi << 4;
2366 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
2367 *vlan_encapsulated_proto = __constant_htons(ETH_P_IP);
2368 }
2369
2119 iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)); 2370 iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
2120 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2371 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
2121 2372
@@ -2124,7 +2375,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2124 2375
2125 /* Eth + IPh + UDPh + mpls */ 2376 /* Eth + IPh + UDPh + mpls */
2126 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2377 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
2127 pkt_dev->nr_labels*sizeof(u32); 2378 pkt_dev->nr_labels*sizeof(u32) - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev);
2128 if (datalen < sizeof(struct pktgen_hdr)) 2379 if (datalen < sizeof(struct pktgen_hdr))
2129 datalen = sizeof(struct pktgen_hdr); 2380 datalen = sizeof(struct pktgen_hdr);
2130 2381
@@ -2136,7 +2387,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2136 iph->ihl = 5; 2387 iph->ihl = 5;
2137 iph->version = 4; 2388 iph->version = 4;
2138 iph->ttl = 32; 2389 iph->ttl = 32;
2139 iph->tos = 0; 2390 iph->tos = pkt_dev->tos;
2140 iph->protocol = IPPROTO_UDP; /* UDP */ 2391 iph->protocol = IPPROTO_UDP; /* UDP */
2141 iph->saddr = pkt_dev->cur_saddr; 2392 iph->saddr = pkt_dev->cur_saddr;
2142 iph->daddr = pkt_dev->cur_daddr; 2393 iph->daddr = pkt_dev->cur_daddr;
@@ -2146,7 +2397,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2146 iph->check = 0; 2397 iph->check = 0;
2147 iph->check = ip_fast_csum((void *)iph, iph->ihl); 2398 iph->check = ip_fast_csum((void *)iph, iph->ihl);
2148 skb->protocol = protocol; 2399 skb->protocol = protocol;
2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2400 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32) -
2401 VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev);
2150 skb->dev = odev; 2402 skb->dev = odev;
2151 skb->pkt_type = PACKET_HOST; 2403 skb->pkt_type = PACKET_HOST;
2152 skb->nh.iph = iph; 2404 skb->nh.iph = iph;
@@ -2218,7 +2470,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2218 pgh->tv_sec = htonl(timestamp.tv_sec); 2470 pgh->tv_sec = htonl(timestamp.tv_sec);
2219 pgh->tv_usec = htonl(timestamp.tv_usec); 2471 pgh->tv_usec = htonl(timestamp.tv_usec);
2220 } 2472 }
2221 pkt_dev->seq_num++;
2222 2473
2223 return skb; 2474 return skb;
2224} 2475}
@@ -2402,17 +2653,26 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2402 struct pktgen_hdr *pgh = NULL; 2653 struct pktgen_hdr *pgh = NULL;
2403 __be16 protocol = __constant_htons(ETH_P_IPV6); 2654 __be16 protocol = __constant_htons(ETH_P_IPV6);
2404 __be32 *mpls; 2655 __be32 *mpls;
2656 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
2657 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2658 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2659 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2405 2660
2406 if (pkt_dev->nr_labels) 2661 if (pkt_dev->nr_labels)
2407 protocol = __constant_htons(ETH_P_MPLS_UC); 2662 protocol = __constant_htons(ETH_P_MPLS_UC);
2408 2663
2664 if (pkt_dev->vlan_id != 0xffff)
2665 protocol = __constant_htons(ETH_P_8021Q);
2666
2409 /* Update any of the values, used when we're incrementing various 2667 /* Update any of the values, used when we're incrementing various
2410 * fields. 2668 * fields.
2411 */ 2669 */
2412 mod_cur_headers(pkt_dev); 2670 mod_cur_headers(pkt_dev);
2413 2671
2414 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2672 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
2415 pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); 2673 pkt_dev->nr_labels*sizeof(u32) +
2674 VLAN_TAG_SIZE(pkt_dev) + SVLAN_TAG_SIZE(pkt_dev),
2675 GFP_ATOMIC);
2416 if (!skb) { 2676 if (!skb) {
2417 sprintf(pkt_dev->result, "No memory"); 2677 sprintf(pkt_dev->result, "No memory");
2418 return NULL; 2678 return NULL;
@@ -2425,16 +2685,34 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2425 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2685 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32));
2426 if (pkt_dev->nr_labels) 2686 if (pkt_dev->nr_labels)
2427 mpls_push(mpls, pkt_dev); 2687 mpls_push(mpls, pkt_dev);
2688
2689 if (pkt_dev->vlan_id != 0xffff) {
2690 if(pkt_dev->svlan_id != 0xffff) {
2691 svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
2692 *svlan_tci = htons(pkt_dev->svlan_id);
2693 *svlan_tci |= pkt_dev->svlan_p << 5;
2694 *svlan_tci |= pkt_dev->svlan_cfi << 4;
2695 svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
2696 *svlan_encapsulated_proto = __constant_htons(ETH_P_8021Q);
2697 }
2698 vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
2699 *vlan_tci = htons(pkt_dev->vlan_id);
2700 *vlan_tci |= pkt_dev->vlan_p << 5;
2701 *vlan_tci |= pkt_dev->vlan_cfi << 4;
2702 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
2703 *vlan_encapsulated_proto = __constant_htons(ETH_P_IPV6);
2704 }
2705
2428 iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr)); 2706 iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr));
2429 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2707 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
2430 2708
2431 memcpy(eth, pkt_dev->hh, 12); 2709 memcpy(eth, pkt_dev->hh, 12);
2432 *(u16 *) & eth[12] = __constant_htons(ETH_P_IPV6); 2710 *(u16 *) & eth[12] = protocol;
2433 2711
2434 /* Eth + IPh + UDPh + mpls */ 2712 /* Eth + IPh + UDPh + mpls */
2435 datalen = pkt_dev->cur_pkt_size - 14 - 2713 datalen = pkt_dev->cur_pkt_size - 14 -
2436 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2714 sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
2437 pkt_dev->nr_labels*sizeof(u32); 2715 pkt_dev->nr_labels*sizeof(u32) - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev);
2438 2716
2439 if (datalen < sizeof(struct pktgen_hdr)) { 2717 if (datalen < sizeof(struct pktgen_hdr)) {
2440 datalen = sizeof(struct pktgen_hdr); 2718 datalen = sizeof(struct pktgen_hdr);
@@ -2450,6 +2728,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2450 2728
2451 *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ 2729 *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */
2452 2730
2731 if (pkt_dev->traffic_class) {
2732 /* Version + traffic class + flow (0) */
2733 *(u32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20));
2734 }
2735
2453 iph->hop_limit = 32; 2736 iph->hop_limit = 32;
2454 2737
2455 iph->payload_len = htons(sizeof(struct udphdr) + datalen); 2738 iph->payload_len = htons(sizeof(struct udphdr) + datalen);
@@ -2458,7 +2741,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2458 ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); 2741 ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
2459 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); 2742 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
2460 2743
2461 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2744 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32) -
2745 VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev);
2462 skb->protocol = protocol; 2746 skb->protocol = protocol;
2463 skb->dev = odev; 2747 skb->dev = odev;
2464 skb->pkt_type = PACKET_HOST; 2748 skb->pkt_type = PACKET_HOST;
@@ -2531,7 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2531 pgh->tv_sec = htonl(timestamp.tv_sec); 2815 pgh->tv_sec = htonl(timestamp.tv_sec);
2532 pgh->tv_usec = htonl(timestamp.tv_usec); 2816 pgh->tv_usec = htonl(timestamp.tv_usec);
2533 } 2817 }
2534 pkt_dev->seq_num++; 2818 /* pkt_dev->seq_num++; FF: you really mean this? */
2535 2819
2536 return skb; 2820 return skb;
2537} 2821}
@@ -3177,6 +3461,13 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3177 pkt_dev->udp_dst_min = 9; 3461 pkt_dev->udp_dst_min = 9;
3178 pkt_dev->udp_dst_max = 9; 3462 pkt_dev->udp_dst_max = 9;
3179 3463
3464 pkt_dev->vlan_p = 0;
3465 pkt_dev->vlan_cfi = 0;
3466 pkt_dev->vlan_id = 0xffff;
3467 pkt_dev->svlan_p = 0;
3468 pkt_dev->svlan_cfi = 0;
3469 pkt_dev->svlan_id = 0xffff;
3470
3180 strncpy(pkt_dev->ifname, ifname, IFNAMSIZ); 3471 strncpy(pkt_dev->ifname, ifname, IFNAMSIZ);
3181 3472
3182 if (!pktgen_setup_dev(pkt_dev)) { 3473 if (!pktgen_setup_dev(pkt_dev)) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d8e25e08cb7e..02f3c7947898 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -562,7 +562,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
562 562
563 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 563 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
564 if (err < 0) 564 if (err < 0)
565 goto errout; 565 return err;
566 566
567 ifm = nlmsg_data(nlh); 567 ifm = nlmsg_data(nlh);
568 if (ifm->ifi_index >= 0) { 568 if (ifm->ifi_index >= 0) {
@@ -602,7 +602,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
602 goto errout; 602 goto errout;
603 } 603 }
604 604
605 err = rtnl_unicast(skb, NETLINK_CB(skb).pid); 605 err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
606errout: 606errout:
607 kfree(iw_buf); 607 kfree(iw_buf);
608 dev_put(dev); 608 dev_put(dev);
diff --git a/net/core/scm.c b/net/core/scm.c
index 649d01ef35b6..271cf060ef8c 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -245,8 +245,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
245 if (i > 0) 245 if (i > 0)
246 { 246 {
247 int cmlen = CMSG_LEN(i*sizeof(int)); 247 int cmlen = CMSG_LEN(i*sizeof(int));
248 if (!err) 248 err = put_user(SOL_SOCKET, &cm->cmsg_level);
249 err = put_user(SOL_SOCKET, &cm->cmsg_level);
250 if (!err) 249 if (!err)
251 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 250 err = put_user(SCM_RIGHTS, &cm->cmsg_type);
252 if (!err) 251 if (!err)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c448c7f6fde2..3c23760c5827 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -156,7 +156,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
156 156
157 /* Get the DATA. Size must match skb_add_mtu(). */ 157 /* Get the DATA. Size must match skb_add_mtu(). */
158 size = SKB_DATA_ALIGN(size); 158 size = SKB_DATA_ALIGN(size);
159 data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 159 data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
160 gfp_mask);
160 if (!data) 161 if (!data)
161 goto nodata; 162 goto nodata;
162 163
diff --git a/net/core/sock.c b/net/core/sock.c
index b77e155cbe6c..d472db4776c3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -823,7 +823,7 @@ static void inline sock_lock_init(struct sock *sk)
823 af_family_slock_key_strings[sk->sk_family]); 823 af_family_slock_key_strings[sk->sk_family]);
824 lockdep_init_map(&sk->sk_lock.dep_map, 824 lockdep_init_map(&sk->sk_lock.dep_map,
825 af_family_key_strings[sk->sk_family], 825 af_family_key_strings[sk->sk_family],
826 af_family_keys + sk->sk_family); 826 af_family_keys + sk->sk_family, 0);
827} 827}
828 828
829/** 829/**
diff --git a/net/core/utils.c b/net/core/utils.c
index 2682490777de..d93fe64f6693 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Authors: 4 * Authors:
5 * net_random Alan Cox 5 * net_random Alan Cox
6 * net_ratelimit Andy Kleen 6 * net_ratelimit Andi Kleen
7 * in{4,6}_pton YOSHIFUJI Hideaki, Copyright (C)2006 USAGI/WIDE Project 7 * in{4,6}_pton YOSHIFUJI Hideaki, Copyright (C)2006 USAGI/WIDE Project
8 * 8 *
9 * Created by Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 9 * Created by Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
@@ -30,119 +30,6 @@
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32 32
33/*
34 This is a maximally equidistributed combined Tausworthe generator
35 based on code from GNU Scientific Library 1.5 (30 Jun 2004)
36
37 x_n = (s1_n ^ s2_n ^ s3_n)
38
39 s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
40 s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
41 s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
42
43 The period of this generator is about 2^88.
44
45 From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
46 Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
47
48 This is available on the net from L'Ecuyer's home page,
49
50 http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
51 ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
52
53 There is an erratum in the paper "Tables of Maximally
54 Equidistributed Combined LFSR Generators", Mathematics of
55 Computation, 68, 225 (1999), 261--269:
56 http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
57
58 ... the k_j most significant bits of z_j must be non-
59 zero, for each j. (Note: this restriction also applies to the
60 computer code given in [4], but was mistakenly not mentioned in
61 that paper.)
62
63 This affects the seeding procedure by imposing the requirement
64 s1 > 1, s2 > 7, s3 > 15.
65
66*/
67struct nrnd_state {
68 u32 s1, s2, s3;
69};
70
71static DEFINE_PER_CPU(struct nrnd_state, net_rand_state);
72
73static u32 __net_random(struct nrnd_state *state)
74{
75#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
76
77 state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
78 state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
79 state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
80
81 return (state->s1 ^ state->s2 ^ state->s3);
82}
83
84static void __net_srandom(struct nrnd_state *state, unsigned long s)
85{
86 if (s == 0)
87 s = 1; /* default seed is 1 */
88
89#define LCG(n) (69069 * n)
90 state->s1 = LCG(s);
91 state->s2 = LCG(state->s1);
92 state->s3 = LCG(state->s2);
93
94 /* "warm it up" */
95 __net_random(state);
96 __net_random(state);
97 __net_random(state);
98 __net_random(state);
99 __net_random(state);
100 __net_random(state);
101}
102
103
104unsigned long net_random(void)
105{
106 unsigned long r;
107 struct nrnd_state *state = &get_cpu_var(net_rand_state);
108 r = __net_random(state);
109 put_cpu_var(state);
110 return r;
111}
112
113
114void net_srandom(unsigned long entropy)
115{
116 struct nrnd_state *state = &get_cpu_var(net_rand_state);
117 __net_srandom(state, state->s1^entropy);
118 put_cpu_var(state);
119}
120
121void __init net_random_init(void)
122{
123 int i;
124
125 for_each_possible_cpu(i) {
126 struct nrnd_state *state = &per_cpu(net_rand_state,i);
127 __net_srandom(state, i+jiffies);
128 }
129}
130
131static int net_random_reseed(void)
132{
133 int i;
134 unsigned long seed;
135
136 for_each_possible_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138
139 get_random_bytes(&seed, sizeof(seed));
140 __net_srandom(state, seed);
141 }
142 return 0;
143}
144late_initcall(net_random_reseed);
145
146int net_msg_cost = 5*HZ; 33int net_msg_cost = 5*HZ;
147int net_msg_burst = 10; 34int net_msg_burst = 10;
148 35
@@ -153,10 +40,7 @@ int net_ratelimit(void)
153{ 40{
154 return __printk_ratelimit(net_msg_cost, net_msg_burst); 41 return __printk_ratelimit(net_msg_cost, net_msg_burst);
155} 42}
156
157EXPORT_SYMBOL(net_random);
158EXPORT_SYMBOL(net_ratelimit); 43EXPORT_SYMBOL(net_ratelimit);
159EXPORT_SYMBOL(net_srandom);
160 44
161/* 45/*
162 * Convert an ASCII string to binary IP. 46 * Convert an ASCII string to binary IP.
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 3168fca312f7..ffff0da46c6e 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -68,6 +68,14 @@
68 * 68 *
69 * v8 - 17.02.06 - Jean II 69 * v8 - 17.02.06 - Jean II
70 * o RtNetlink requests support (SET/GET) 70 * o RtNetlink requests support (SET/GET)
71 *
72 * v8b - 03.08.06 - Herbert Xu
73 * o Fix Wireless Event locking issues.
74 *
75 * v9 - 14.3.06 - Jean II
76 * o Change length in ESSID and NICK to strlen() instead of strlen()+1
77 * o Make standard_ioctl_num and standard_event_num unsigned
78 * o Remove (struct net_device *)->get_wireless_stats()
71 */ 79 */
72 80
73/***************************** INCLUDES *****************************/ 81/***************************** INCLUDES *****************************/
@@ -234,24 +242,24 @@ static const struct iw_ioctl_description standard_ioctl[] = {
234 [SIOCSIWESSID - SIOCIWFIRST] = { 242 [SIOCSIWESSID - SIOCIWFIRST] = {
235 .header_type = IW_HEADER_TYPE_POINT, 243 .header_type = IW_HEADER_TYPE_POINT,
236 .token_size = 1, 244 .token_size = 1,
237 .max_tokens = IW_ESSID_MAX_SIZE + 1, 245 .max_tokens = IW_ESSID_MAX_SIZE,
238 .flags = IW_DESCR_FLAG_EVENT, 246 .flags = IW_DESCR_FLAG_EVENT,
239 }, 247 },
240 [SIOCGIWESSID - SIOCIWFIRST] = { 248 [SIOCGIWESSID - SIOCIWFIRST] = {
241 .header_type = IW_HEADER_TYPE_POINT, 249 .header_type = IW_HEADER_TYPE_POINT,
242 .token_size = 1, 250 .token_size = 1,
243 .max_tokens = IW_ESSID_MAX_SIZE + 1, 251 .max_tokens = IW_ESSID_MAX_SIZE,
244 .flags = IW_DESCR_FLAG_DUMP, 252 .flags = IW_DESCR_FLAG_DUMP,
245 }, 253 },
246 [SIOCSIWNICKN - SIOCIWFIRST] = { 254 [SIOCSIWNICKN - SIOCIWFIRST] = {
247 .header_type = IW_HEADER_TYPE_POINT, 255 .header_type = IW_HEADER_TYPE_POINT,
248 .token_size = 1, 256 .token_size = 1,
249 .max_tokens = IW_ESSID_MAX_SIZE + 1, 257 .max_tokens = IW_ESSID_MAX_SIZE,
250 }, 258 },
251 [SIOCGIWNICKN - SIOCIWFIRST] = { 259 [SIOCGIWNICKN - SIOCIWFIRST] = {
252 .header_type = IW_HEADER_TYPE_POINT, 260 .header_type = IW_HEADER_TYPE_POINT,
253 .token_size = 1, 261 .token_size = 1,
254 .max_tokens = IW_ESSID_MAX_SIZE + 1, 262 .max_tokens = IW_ESSID_MAX_SIZE,
255 }, 263 },
256 [SIOCSIWRATE - SIOCIWFIRST] = { 264 [SIOCSIWRATE - SIOCIWFIRST] = {
257 .header_type = IW_HEADER_TYPE_PARAM, 265 .header_type = IW_HEADER_TYPE_PARAM,
@@ -338,8 +346,8 @@ static const struct iw_ioctl_description standard_ioctl[] = {
338 .max_tokens = sizeof(struct iw_pmksa), 346 .max_tokens = sizeof(struct iw_pmksa),
339 }, 347 },
340}; 348};
341static const int standard_ioctl_num = (sizeof(standard_ioctl) / 349static const unsigned standard_ioctl_num = (sizeof(standard_ioctl) /
342 sizeof(struct iw_ioctl_description)); 350 sizeof(struct iw_ioctl_description));
343 351
344/* 352/*
345 * Meta-data about all the additional standard Wireless Extension events 353 * Meta-data about all the additional standard Wireless Extension events
@@ -389,8 +397,8 @@ static const struct iw_ioctl_description standard_event[] = {
389 .max_tokens = sizeof(struct iw_pmkid_cand), 397 .max_tokens = sizeof(struct iw_pmkid_cand),
390 }, 398 },
391}; 399};
392static const int standard_event_num = (sizeof(standard_event) / 400static const unsigned standard_event_num = (sizeof(standard_event) /
393 sizeof(struct iw_ioctl_description)); 401 sizeof(struct iw_ioctl_description));
394 402
395/* Size (in bytes) of the various private data types */ 403/* Size (in bytes) of the various private data types */
396static const char iw_priv_type_size[] = { 404static const char iw_priv_type_size[] = {
@@ -465,17 +473,6 @@ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
465 (dev->wireless_handlers->get_wireless_stats != NULL)) 473 (dev->wireless_handlers->get_wireless_stats != NULL))
466 return dev->wireless_handlers->get_wireless_stats(dev); 474 return dev->wireless_handlers->get_wireless_stats(dev);
467 475
468 /* Old location, field to be removed in next WE */
469 if(dev->get_wireless_stats) {
470 static int printed_message;
471
472 if (!printed_message++)
473 printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n",
474 dev->name);
475
476 return dev->get_wireless_stats(dev);
477 }
478
479 /* Not found */ 476 /* Not found */
480 return (struct iw_statistics *) NULL; 477 return (struct iw_statistics *) NULL;
481} 478}
@@ -1843,8 +1840,33 @@ int wireless_rtnetlink_set(struct net_device * dev,
1843 */ 1840 */
1844 1841
1845#ifdef WE_EVENT_RTNETLINK 1842#ifdef WE_EVENT_RTNETLINK
1843/* ---------------------------------------------------------------- */
1844/*
1845 * Locking...
1846 * ----------
1847 *
1848 * Thanks to Herbert Xu <herbert@gondor.apana.org.au> for fixing
1849 * the locking issue in here and implementing this code !
1850 *
1851 * The issue : wireless_send_event() is often called in interrupt context,
1852 * while the Netlink layer can never be called in interrupt context.
1853 * The fully formed RtNetlink events are queued, and then a tasklet is run
1854 * to feed those to Netlink.
1855 * The skb_queue is interrupt safe, and its lock is not held while calling
1856 * Netlink, so there is no possibility of dealock.
1857 * Jean II
1858 */
1859
1846static struct sk_buff_head wireless_nlevent_queue; 1860static struct sk_buff_head wireless_nlevent_queue;
1847 1861
1862static int __init wireless_nlevent_init(void)
1863{
1864 skb_queue_head_init(&wireless_nlevent_queue);
1865 return 0;
1866}
1867
1868subsys_initcall(wireless_nlevent_init);
1869
1848static void wireless_nlevent_process(unsigned long data) 1870static void wireless_nlevent_process(unsigned long data)
1849{ 1871{
1850 struct sk_buff *skb; 1872 struct sk_buff *skb;
@@ -1921,13 +1943,6 @@ static inline void rtmsg_iwinfo(struct net_device * dev,
1921 tasklet_schedule(&wireless_nlevent_tasklet); 1943 tasklet_schedule(&wireless_nlevent_tasklet);
1922} 1944}
1923 1945
1924static int __init wireless_nlevent_init(void)
1925{
1926 skb_queue_head_init(&wireless_nlevent_queue);
1927 return 0;
1928}
1929
1930subsys_initcall(wireless_nlevent_init);
1931#endif /* WE_EVENT_RTNETLINK */ 1946#endif /* WE_EVENT_RTNETLINK */
1932 1947
1933/* ---------------------------------------------------------------- */ 1948/* ---------------------------------------------------------------- */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 66be29b6f508..7e746c4c1688 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -50,7 +50,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
50 struct dccp_sock *dp = dccp_sk(sk); 50 struct dccp_sock *dp = dccp_sk(sk);
51 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; 51 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
52 struct rtable *rt; 52 struct rtable *rt;
53 u32 daddr, nexthop; 53 __be32 daddr, nexthop;
54 int tmp; 54 int tmp;
55 int err; 55 int err;
56 56
@@ -311,7 +311,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
311 } 311 }
312 312
313 if (sk->sk_state == DCCP_TIME_WAIT) { 313 if (sk->sk_state == DCCP_TIME_WAIT) {
314 inet_twsk_put((struct inet_timewait_sock *)sk); 314 inet_twsk_put(inet_twsk(sk));
315 return; 315 return;
316 } 316 }
317 317
@@ -614,7 +614,7 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
614 bh_lock_sock(nsk); 614 bh_lock_sock(nsk);
615 return nsk; 615 return nsk;
616 } 616 }
617 inet_twsk_put((struct inet_timewait_sock *)nsk); 617 inet_twsk_put(inet_twsk(nsk));
618 return NULL; 618 return NULL;
619 } 619 }
620 620
@@ -980,7 +980,7 @@ discard_and_relse:
980 goto discard_it; 980 goto discard_it;
981 981
982do_time_wait: 982do_time_wait:
983 inet_twsk_put((struct inet_timewait_sock *)sk); 983 inet_twsk_put(inet_twsk(sk));
984 goto no_dccp_socket; 984 goto no_dccp_socket;
985} 985}
986 986
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 7a47399cf31f..7171a78671aa 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -285,7 +285,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
285 } 285 }
286 286
287 if (sk->sk_state == DCCP_TIME_WAIT) { 287 if (sk->sk_state == DCCP_TIME_WAIT) {
288 inet_twsk_put((struct inet_timewait_sock *)sk); 288 inet_twsk_put(inet_twsk(sk));
289 return; 289 return;
290 } 290 }
291 291
@@ -663,7 +663,7 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
663 bh_lock_sock(nsk); 663 bh_lock_sock(nsk);
664 return nsk; 664 return nsk;
665 } 665 }
666 inet_twsk_put((struct inet_timewait_sock *)nsk); 666 inet_twsk_put(inet_twsk(nsk));
667 return NULL; 667 return NULL;
668 } 668 }
669 669
@@ -1109,7 +1109,7 @@ discard_and_relse:
1109 goto discard_it; 1109 goto discard_it;
1110 1110
1111do_time_wait: 1111do_time_wait:
1112 inet_twsk_put((struct inet_timewait_sock *)sk); 1112 inet_twsk_put(inet_twsk(sk));
1113 goto no_dccp_socket; 1113 goto no_dccp_socket;
1114} 1114}
1115 1115
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 70e027375682..3456cd331835 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1178,8 +1178,10 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
1178 if (peer) { 1178 if (peer) {
1179 if ((sock->state != SS_CONNECTED && 1179 if ((sock->state != SS_CONNECTED &&
1180 sock->state != SS_CONNECTING) && 1180 sock->state != SS_CONNECTING) &&
1181 scp->accept_mode == ACC_IMMED) 1181 scp->accept_mode == ACC_IMMED) {
1182 release_sock(sk);
1182 return -ENOTCONN; 1183 return -ENOTCONN;
1184 }
1183 1185
1184 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); 1186 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1185 } else { 1187 } else {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index dd0761e3d280..23489f7232d2 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -267,9 +267,14 @@ static void dn_dst_link_failure(struct sk_buff *skb)
267 267
268static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 268static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
269{ 269{
270 return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && 270 return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
271 fl1->oif == fl2->oif && 271 (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
272 fl1->iif == fl2->iif; 272#ifdef CONFIG_DECNET_ROUTE_FWMARK
273 (fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) |
274#endif
275 (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
276 (fl1->oif ^ fl2->oif) |
277 (fl1->iif ^ fl2->iif)) == 0;
273} 278}
274 279
275static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 280static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
@@ -1270,7 +1275,6 @@ static int dn_route_input_slow(struct sk_buff *skb)
1270 goto e_inval; 1275 goto e_inval;
1271 1276
1272 res.type = RTN_LOCAL; 1277 res.type = RTN_LOCAL;
1273 flags |= RTCF_DIRECTSRC;
1274 } else { 1278 } else {
1275 __le16 src_map = fl.fld_src; 1279 __le16 src_map = fl.fld_src;
1276 free_res = 1; 1280 free_res = 1;
@@ -1341,7 +1345,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1341 goto make_route; 1345 goto make_route;
1342 1346
1343 /* Packet was intra-ethernet, so we know its on-link */ 1347 /* Packet was intra-ethernet, so we know its on-link */
1344 if (cb->rt_flags | DN_RT_F_IE) { 1348 if (cb->rt_flags & DN_RT_F_IE) {
1345 gateway = cb->src; 1349 gateway = cb->src;
1346 flags |= RTCF_DIRECTSRC; 1350 flags |= RTCF_DIRECTSRC;
1347 goto make_route; 1351 goto make_route;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 43863933f27f..4bd78c8cfb26 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -223,7 +223,7 @@ static int eth_header_parse(struct sk_buff *skb, unsigned char *haddr)
223 */ 223 */
224int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh) 224int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh)
225{ 225{
226 unsigned short type = hh->hh_type; 226 __be16 type = hh->hh_type;
227 struct ethhdr *eth; 227 struct ethhdr *eth;
228 struct net_device *dev = neigh->dev; 228 struct net_device *dev = neigh->dev;
229 229
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 75320b6842ab..2aa779d18f38 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -80,10 +80,10 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
80 * If it's our network, ignore the change, we're already doing it! 80 * If it's our network, ignore the change, we're already doing it!
81 */ 81 */
82 if((sm->associnfo.associating || sm->associated) && 82 if((sm->associnfo.associating || sm->associated) &&
83 (data->essid.flags && data->essid.length && extra)) { 83 (data->essid.flags && data->essid.length)) {
84 /* Get the associating network */ 84 /* Get the associating network */
85 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); 85 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
86 if(n && n->essid.len == (data->essid.length - 1) && 86 if(n && n->essid.len == data->essid.length &&
87 !memcmp(n->essid.data, extra, n->essid.len)) { 87 !memcmp(n->essid.data, extra, n->essid.len)) {
88 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n", 88 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
89 MAC_ARG(sm->associnfo.bssid)); 89 MAC_ARG(sm->associnfo.bssid));
@@ -109,8 +109,8 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
109 sm->associnfo.static_essid = 0; 109 sm->associnfo.static_essid = 0;
110 sm->associnfo.assoc_wait = 0; 110 sm->associnfo.assoc_wait = 0;
111 111
112 if (data->essid.flags && data->essid.length && extra /*required?*/) { 112 if (data->essid.flags && data->essid.length) {
113 length = min(data->essid.length - 1, IW_ESSID_MAX_SIZE); 113 length = min((int)data->essid.length, IW_ESSID_MAX_SIZE);
114 if (length) { 114 if (length) {
115 memcpy(sm->associnfo.req_essid.data, extra, length); 115 memcpy(sm->associnfo.req_essid.data, extra, length);
116 sm->associnfo.static_essid = 1; 116 sm->associnfo.static_essid = 1;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 30af4a4dfcc8..5572071af735 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -64,7 +64,7 @@ config ASK_IP_FIB_HASH
64config IP_FIB_TRIE 64config IP_FIB_TRIE
65 bool "FIB_TRIE" 65 bool "FIB_TRIE"
66 ---help--- 66 ---help---
67 Use new experimental LC-trie as FIB lookup algoritm. 67 Use new experimental LC-trie as FIB lookup algorithm.
68 This improves lookup performance if you have a large 68 This improves lookup performance if you have a large
69 number of routes. 69 number of routes.
70 70
@@ -434,6 +434,15 @@ config INET_XFRM_MODE_TUNNEL
434 434
435 If unsure, say Y. 435 If unsure, say Y.
436 436
437config INET_XFRM_MODE_BEET
438 tristate "IP: IPsec BEET mode"
439 default y
440 select XFRM
441 ---help---
442 Support for IPsec BEET mode.
443
444 If unsure, say Y.
445
437config INET_DIAG 446config INET_DIAG
438 tristate "INET: socket monitoring interface" 447 tristate "INET: socket monitoring interface"
439 default y 448 default y
@@ -526,7 +535,7 @@ config TCP_CONG_HYBLA
526 ---help--- 535 ---help---
527 TCP-Hybla is a sender-side only change that eliminates penalization of 536 TCP-Hybla is a sender-side only change that eliminates penalization of
528 long-RTT, large-bandwidth connections, like when satellite legs are 537 long-RTT, large-bandwidth connections, like when satellite legs are
529 involved, expecially when sharing a common bottleneck with normal 538 involved, especially when sharing a common bottleneck with normal
530 terrestrial connections. 539 terrestrial connections.
531 540
532config TCP_CONG_VEGAS 541config TCP_CONG_VEGAS
@@ -556,7 +565,7 @@ config TCP_CONG_LP
556 default n 565 default n
557 ---help--- 566 ---help---
558 TCP Low Priority (TCP-LP), a distributed algorithm whose goal is 567 TCP Low Priority (TCP-LP), a distributed algorithm whose goal is
559 to utiliza only the excess network bandwidth as compared to the 568 to utilize only the excess network bandwidth as compared to the
560 ``fair share`` of bandwidth as targeted by TCP. 569 ``fair share`` of bandwidth as targeted by TCP.
561 See http://www-ece.rice.edu/networks/TCP-LP/ 570 See http://www-ece.rice.edu/networks/TCP-LP/
562 571
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f66049e28aeb..15645c51520c 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_INET_AH) += ah4.o
23obj-$(CONFIG_INET_ESP) += esp4.o 23obj-$(CONFIG_INET_ESP) += esp4.o
24obj-$(CONFIG_INET_IPCOMP) += ipcomp.o 24obj-$(CONFIG_INET_IPCOMP) += ipcomp.o
25obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o 25obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o
26obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o
26obj-$(CONFIG_INET_TUNNEL) += tunnel4.o 27obj-$(CONFIG_INET_TUNNEL) += tunnel4.o
27obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o 28obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o
28obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o 29obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index fdd89e37b9aa..edcf0932ac6d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -996,7 +996,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
996 struct rtable *rt; 996 struct rtable *rt;
997 __u32 old_saddr = inet->saddr; 997 __u32 old_saddr = inet->saddr;
998 __u32 new_saddr; 998 __u32 new_saddr;
999 __u32 daddr = inet->daddr; 999 __be32 daddr = inet->daddr;
1000 1000
1001 if (inet->opt && inet->opt->srr) 1001 if (inet->opt && inet->opt->srr)
1002 daddr = inet->opt->faddr; 1002 daddr = inet->opt->faddr;
@@ -1043,7 +1043,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1043{ 1043{
1044 struct inet_sock *inet = inet_sk(sk); 1044 struct inet_sock *inet = inet_sk(sk);
1045 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); 1045 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1046 u32 daddr; 1046 __be32 daddr;
1047 int err; 1047 int err;
1048 1048
1049 /* Route is OK, nothing to do. */ 1049 /* Route is OK, nothing to do. */
@@ -1342,10 +1342,10 @@ static int __init inet_init(void)
1342 rc = 0; 1342 rc = 0;
1343out: 1343out:
1344 return rc; 1344 return rc;
1345out_unregister_tcp_proto:
1346 proto_unregister(&tcp_prot);
1347out_unregister_udp_proto: 1345out_unregister_udp_proto:
1348 proto_unregister(&udp_prot); 1346 proto_unregister(&udp_prot);
1347out_unregister_tcp_proto:
1348 proto_unregister(&tcp_prot);
1349 goto out; 1349 goto out;
1350} 1350}
1351 1351
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c8a3723bc001..cfb5d3de9c84 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,4 +1,4 @@
1/* linux/net/inet/arp.c 1/* linux/net/ipv4/arp.c
2 * 2 *
3 * Version: $Id: arp.c,v 1.99 2001/08/30 22:55:42 davem Exp $ 3 * Version: $Id: arp.c,v 1.99 2001/08/30 22:55:42 davem Exp $
4 * 4 *
@@ -234,7 +234,7 @@ static u32 arp_hash(const void *pkey, const struct net_device *dev)
234 234
235static int arp_constructor(struct neighbour *neigh) 235static int arp_constructor(struct neighbour *neigh)
236{ 236{
237 u32 addr = *(u32*)neigh->primary_key; 237 __be32 addr = *(__be32*)neigh->primary_key;
238 struct net_device *dev = neigh->dev; 238 struct net_device *dev = neigh->dev;
239 struct in_device *in_dev; 239 struct in_device *in_dev;
240 struct neigh_parms *parms; 240 struct neigh_parms *parms;
@@ -330,10 +330,10 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
330 330
331static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) 331static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
332{ 332{
333 u32 saddr = 0; 333 __be32 saddr = 0;
334 u8 *dst_ha = NULL; 334 u8 *dst_ha = NULL;
335 struct net_device *dev = neigh->dev; 335 struct net_device *dev = neigh->dev;
336 u32 target = *(u32*)neigh->primary_key; 336 __be32 target = *(__be32*)neigh->primary_key;
337 int probes = atomic_read(&neigh->probes); 337 int probes = atomic_read(&neigh->probes);
338 struct in_device *in_dev = in_dev_get(dev); 338 struct in_device *in_dev = in_dev_get(dev);
339 339
@@ -385,7 +385,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
385} 385}
386 386
387static int arp_ignore(struct in_device *in_dev, struct net_device *dev, 387static int arp_ignore(struct in_device *in_dev, struct net_device *dev,
388 u32 sip, u32 tip) 388 __be32 sip, __be32 tip)
389{ 389{
390 int scope; 390 int scope;
391 391
@@ -420,7 +420,7 @@ static int arp_ignore(struct in_device *in_dev, struct net_device *dev,
420 return !inet_confirm_addr(dev, sip, tip, scope); 420 return !inet_confirm_addr(dev, sip, tip, scope);
421} 421}
422 422
423static int arp_filter(__u32 sip, __u32 tip, struct net_device *dev) 423static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
424{ 424{
425 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip, 425 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
426 .saddr = tip } } }; 426 .saddr = tip } } };
@@ -449,7 +449,7 @@ static int arp_filter(__u32 sip, __u32 tip, struct net_device *dev)
449 * is allowed to use this function, it is scheduled to be removed. --ANK 449 * is allowed to use this function, it is scheduled to be removed. --ANK
450 */ 450 */
451 451
452static int arp_set_predefined(int addr_hint, unsigned char * haddr, u32 paddr, struct net_device * dev) 452static int arp_set_predefined(int addr_hint, unsigned char * haddr, __be32 paddr, struct net_device * dev)
453{ 453{
454 switch (addr_hint) { 454 switch (addr_hint) {
455 case RTN_LOCAL: 455 case RTN_LOCAL:
@@ -470,7 +470,7 @@ static int arp_set_predefined(int addr_hint, unsigned char * haddr, u32 paddr, s
470int arp_find(unsigned char *haddr, struct sk_buff *skb) 470int arp_find(unsigned char *haddr, struct sk_buff *skb)
471{ 471{
472 struct net_device *dev = skb->dev; 472 struct net_device *dev = skb->dev;
473 u32 paddr; 473 __be32 paddr;
474 struct neighbour *n; 474 struct neighbour *n;
475 475
476 if (!skb->dst) { 476 if (!skb->dst) {
@@ -511,7 +511,7 @@ int arp_bind_neighbour(struct dst_entry *dst)
511 if (dev == NULL) 511 if (dev == NULL)
512 return -EINVAL; 512 return -EINVAL;
513 if (n == NULL) { 513 if (n == NULL) {
514 u32 nexthop = ((struct rtable*)dst)->rt_gateway; 514 __be32 nexthop = ((struct rtable*)dst)->rt_gateway;
515 if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT)) 515 if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT))
516 nexthop = 0; 516 nexthop = 0;
517 n = __neigh_lookup_errno( 517 n = __neigh_lookup_errno(
@@ -560,8 +560,8 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
560 * Create an arp packet. If (dest_hw == NULL), we create a broadcast 560 * Create an arp packet. If (dest_hw == NULL), we create a broadcast
561 * message. 561 * message.
562 */ 562 */
563struct sk_buff *arp_create(int type, int ptype, u32 dest_ip, 563struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
564 struct net_device *dev, u32 src_ip, 564 struct net_device *dev, __be32 src_ip,
565 unsigned char *dest_hw, unsigned char *src_hw, 565 unsigned char *dest_hw, unsigned char *src_hw,
566 unsigned char *target_hw) 566 unsigned char *target_hw)
567{ 567{
@@ -675,8 +675,8 @@ void arp_xmit(struct sk_buff *skb)
675/* 675/*
676 * Create and send an arp packet. 676 * Create and send an arp packet.
677 */ 677 */
678void arp_send(int type, int ptype, u32 dest_ip, 678void arp_send(int type, int ptype, __be32 dest_ip,
679 struct net_device *dev, u32 src_ip, 679 struct net_device *dev, __be32 src_ip,
680 unsigned char *dest_hw, unsigned char *src_hw, 680 unsigned char *dest_hw, unsigned char *src_hw,
681 unsigned char *target_hw) 681 unsigned char *target_hw)
682{ 682{
@@ -710,7 +710,7 @@ static int arp_process(struct sk_buff *skb)
710 unsigned char *arp_ptr; 710 unsigned char *arp_ptr;
711 struct rtable *rt; 711 struct rtable *rt;
712 unsigned char *sha, *tha; 712 unsigned char *sha, *tha;
713 u32 sip, tip; 713 __be32 sip, tip;
714 u16 dev_type = dev->type; 714 u16 dev_type = dev->type;
715 int addr_type; 715 int addr_type;
716 struct neighbour *n; 716 struct neighbour *n;
@@ -969,13 +969,13 @@ out_of_mem:
969 969
970static int arp_req_set(struct arpreq *r, struct net_device * dev) 970static int arp_req_set(struct arpreq *r, struct net_device * dev)
971{ 971{
972 u32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr; 972 __be32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr;
973 struct neighbour *neigh; 973 struct neighbour *neigh;
974 int err; 974 int err;
975 975
976 if (r->arp_flags&ATF_PUBL) { 976 if (r->arp_flags&ATF_PUBL) {
977 u32 mask = ((struct sockaddr_in *) &r->arp_netmask)->sin_addr.s_addr; 977 __be32 mask = ((struct sockaddr_in *) &r->arp_netmask)->sin_addr.s_addr;
978 if (mask && mask != 0xFFFFFFFF) 978 if (mask && mask != htonl(0xFFFFFFFF))
979 return -EINVAL; 979 return -EINVAL;
980 if (!dev && (r->arp_flags & ATF_COM)) { 980 if (!dev && (r->arp_flags & ATF_COM)) {
981 dev = dev_getbyhwaddr(r->arp_ha.sa_family, r->arp_ha.sa_data); 981 dev = dev_getbyhwaddr(r->arp_ha.sa_family, r->arp_ha.sa_data);
@@ -1063,7 +1063,7 @@ static unsigned arp_state_to_flags(struct neighbour *neigh)
1063 1063
1064static int arp_req_get(struct arpreq *r, struct net_device *dev) 1064static int arp_req_get(struct arpreq *r, struct net_device *dev)
1065{ 1065{
1066 u32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr; 1066 __be32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr;
1067 struct neighbour *neigh; 1067 struct neighbour *neigh;
1068 int err = -ENXIO; 1068 int err = -ENXIO;
1069 1069
@@ -1084,13 +1084,13 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1084static int arp_req_delete(struct arpreq *r, struct net_device * dev) 1084static int arp_req_delete(struct arpreq *r, struct net_device * dev)
1085{ 1085{
1086 int err; 1086 int err;
1087 u32 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 1087 __be32 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
1088 struct neighbour *neigh; 1088 struct neighbour *neigh;
1089 1089
1090 if (r->arp_flags & ATF_PUBL) { 1090 if (r->arp_flags & ATF_PUBL) {
1091 u32 mask = 1091 __be32 mask =
1092 ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr; 1092 ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr;
1093 if (mask == 0xFFFFFFFF) 1093 if (mask == htonl(0xFFFFFFFF))
1094 return pneigh_delete(&arp_tbl, &ip, dev); 1094 return pneigh_delete(&arp_tbl, &ip, dev);
1095 if (mask == 0) { 1095 if (mask == 0) {
1096 if (dev == NULL) { 1096 if (dev == NULL) {
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index e6ce0b3ba62a..e2077a3aa8c0 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -43,6 +43,7 @@
43#include <net/tcp.h> 43#include <net/tcp.h>
44#include <net/netlabel.h> 44#include <net/netlabel.h>
45#include <net/cipso_ipv4.h> 45#include <net/cipso_ipv4.h>
46#include <asm/atomic.h>
46#include <asm/bug.h> 47#include <asm/bug.h>
47 48
48struct cipso_v4_domhsh_entry { 49struct cipso_v4_domhsh_entry {
@@ -79,7 +80,7 @@ struct cipso_v4_map_cache_entry {
79 unsigned char *key; 80 unsigned char *key;
80 size_t key_len; 81 size_t key_len;
81 82
82 struct netlbl_lsm_cache lsm_data; 83 struct netlbl_lsm_cache *lsm_data;
83 84
84 u32 activity; 85 u32 activity;
85 struct list_head list; 86 struct list_head list;
@@ -188,13 +189,14 @@ static void cipso_v4_doi_domhsh_free(struct rcu_head *entry)
188 * @entry: the entry to free 189 * @entry: the entry to free
189 * 190 *
190 * Description: 191 * Description:
191 * This function frees the memory associated with a cache entry. 192 * This function frees the memory associated with a cache entry including the
193 * LSM cache data if there are no longer any users, i.e. reference count == 0.
192 * 194 *
193 */ 195 */
194static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) 196static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
195{ 197{
196 if (entry->lsm_data.free) 198 if (entry->lsm_data)
197 entry->lsm_data.free(entry->lsm_data.data); 199 netlbl_secattr_cache_free(entry->lsm_data);
198 kfree(entry->key); 200 kfree(entry->key);
199 kfree(entry); 201 kfree(entry);
200} 202}
@@ -315,8 +317,8 @@ static int cipso_v4_cache_check(const unsigned char *key,
315 entry->key_len == key_len && 317 entry->key_len == key_len &&
316 memcmp(entry->key, key, key_len) == 0) { 318 memcmp(entry->key, key, key_len) == 0) {
317 entry->activity += 1; 319 entry->activity += 1;
318 secattr->cache.free = entry->lsm_data.free; 320 atomic_inc(&entry->lsm_data->refcount);
319 secattr->cache.data = entry->lsm_data.data; 321 secattr->cache = entry->lsm_data;
320 if (prev_entry == NULL) { 322 if (prev_entry == NULL) {
321 spin_unlock_bh(&cipso_v4_cache[bkt].lock); 323 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
322 return 0; 324 return 0;
@@ -383,8 +385,8 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
383 memcpy(entry->key, cipso_ptr, cipso_ptr_len); 385 memcpy(entry->key, cipso_ptr, cipso_ptr_len);
384 entry->key_len = cipso_ptr_len; 386 entry->key_len = cipso_ptr_len;
385 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); 387 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
386 entry->lsm_data.free = secattr->cache.free; 388 atomic_inc(&secattr->cache->refcount);
387 entry->lsm_data.data = secattr->cache.data; 389 entry->lsm_data = secattr->cache;
388 390
389 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 391 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
390 spin_lock_bh(&cipso_v4_cache[bkt].lock); 392 spin_lock_bh(&cipso_v4_cache[bkt].lock);
@@ -474,6 +476,7 @@ doi_add_failure_rlock:
474/** 476/**
475 * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine 477 * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine
476 * @doi: the DOI value 478 * @doi: the DOI value
479 * @audit_secid: the LSM secid to use in the audit message
477 * @callback: the DOI cleanup/free callback 480 * @callback: the DOI cleanup/free callback
478 * 481 *
479 * Description: 482 * Description:
@@ -483,7 +486,9 @@ doi_add_failure_rlock:
483 * success and negative values on failure. 486 * success and negative values on failure.
484 * 487 *
485 */ 488 */
486int cipso_v4_doi_remove(u32 doi, void (*callback) (struct rcu_head * head)) 489int cipso_v4_doi_remove(u32 doi,
490 struct netlbl_audit *audit_info,
491 void (*callback) (struct rcu_head * head))
487{ 492{
488 struct cipso_v4_doi *doi_def; 493 struct cipso_v4_doi *doi_def;
489 struct cipso_v4_domhsh_entry *dom_iter; 494 struct cipso_v4_domhsh_entry *dom_iter;
@@ -502,7 +507,8 @@ int cipso_v4_doi_remove(u32 doi, void (*callback) (struct rcu_head * head))
502 spin_unlock(&cipso_v4_doi_list_lock); 507 spin_unlock(&cipso_v4_doi_list_lock);
503 list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) 508 list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list)
504 if (dom_iter->valid) 509 if (dom_iter->valid)
505 netlbl_domhsh_remove(dom_iter->domain); 510 netlbl_domhsh_remove(dom_iter->domain,
511 audit_info);
506 cipso_v4_cache_invalidate(); 512 cipso_v4_cache_invalidate();
507 rcu_read_unlock(); 513 rcu_read_unlock();
508 514
@@ -767,13 +773,15 @@ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
767{ 773{
768 int cat = -1; 774 int cat = -1;
769 u32 bitmap_len_bits = bitmap_len * 8; 775 u32 bitmap_len_bits = bitmap_len * 8;
770 u32 cipso_cat_size = doi_def->map.std->cat.cipso_size; 776 u32 cipso_cat_size;
771 u32 *cipso_array = doi_def->map.std->cat.cipso; 777 u32 *cipso_array;
772 778
773 switch (doi_def->type) { 779 switch (doi_def->type) {
774 case CIPSO_V4_MAP_PASS: 780 case CIPSO_V4_MAP_PASS:
775 return 0; 781 return 0;
776 case CIPSO_V4_MAP_STD: 782 case CIPSO_V4_MAP_STD:
783 cipso_cat_size = doi_def->map.std->cat.cipso_size;
784 cipso_array = doi_def->map.std->cat.cipso;
777 for (;;) { 785 for (;;) {
778 cat = cipso_v4_bitmap_walk(bitmap, 786 cat = cipso_v4_bitmap_walk(bitmap,
779 bitmap_len_bits, 787 bitmap_len_bits,
@@ -819,19 +827,21 @@ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
819 u32 net_spot_max = 0; 827 u32 net_spot_max = 0;
820 u32 host_clen_bits = host_cat_len * 8; 828 u32 host_clen_bits = host_cat_len * 8;
821 u32 net_clen_bits = net_cat_len * 8; 829 u32 net_clen_bits = net_cat_len * 8;
822 u32 host_cat_size = doi_def->map.std->cat.local_size; 830 u32 host_cat_size;
823 u32 *host_cat_array = doi_def->map.std->cat.local; 831 u32 *host_cat_array;
824 832
825 switch (doi_def->type) { 833 switch (doi_def->type) {
826 case CIPSO_V4_MAP_PASS: 834 case CIPSO_V4_MAP_PASS:
827 net_spot_max = host_cat_len - 1; 835 net_spot_max = host_cat_len;
828 while (net_spot_max > 0 && host_cat[net_spot_max] == 0) 836 while (net_spot_max > 0 && host_cat[net_spot_max - 1] == 0)
829 net_spot_max--; 837 net_spot_max--;
830 if (net_spot_max > net_cat_len) 838 if (net_spot_max > net_cat_len)
831 return -EINVAL; 839 return -EINVAL;
832 memcpy(net_cat, host_cat, net_spot_max); 840 memcpy(net_cat, host_cat, net_spot_max);
833 return net_spot_max; 841 return net_spot_max;
834 case CIPSO_V4_MAP_STD: 842 case CIPSO_V4_MAP_STD:
843 host_cat_size = doi_def->map.std->cat.local_size;
844 host_cat_array = doi_def->map.std->cat.local;
835 for (;;) { 845 for (;;) {
836 host_spot = cipso_v4_bitmap_walk(host_cat, 846 host_spot = cipso_v4_bitmap_walk(host_cat,
837 host_clen_bits, 847 host_clen_bits,
@@ -887,8 +897,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
887 int net_spot = -1; 897 int net_spot = -1;
888 u32 net_clen_bits = net_cat_len * 8; 898 u32 net_clen_bits = net_cat_len * 8;
889 u32 host_clen_bits = host_cat_len * 8; 899 u32 host_clen_bits = host_cat_len * 8;
890 u32 net_cat_size = doi_def->map.std->cat.cipso_size; 900 u32 net_cat_size;
891 u32 *net_cat_array = doi_def->map.std->cat.cipso; 901 u32 *net_cat_array;
892 902
893 switch (doi_def->type) { 903 switch (doi_def->type) {
894 case CIPSO_V4_MAP_PASS: 904 case CIPSO_V4_MAP_PASS:
@@ -897,6 +907,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
897 memcpy(host_cat, net_cat, net_cat_len); 907 memcpy(host_cat, net_cat, net_cat_len);
898 return net_cat_len; 908 return net_cat_len;
899 case CIPSO_V4_MAP_STD: 909 case CIPSO_V4_MAP_STD:
910 net_cat_size = doi_def->map.std->cat.cipso_size;
911 net_cat_array = doi_def->map.std->cat.cipso;
900 for (;;) { 912 for (;;) {
901 net_spot = cipso_v4_bitmap_walk(net_cat, 913 net_spot = cipso_v4_bitmap_walk(net_cat,
902 net_clen_bits, 914 net_clen_bits,
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index ec5da4fbd9f4..7b068a891953 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -25,7 +25,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
27 struct rtable *rt; 27 struct rtable *rt;
28 u32 saddr; 28 __be32 saddr;
29 int oif; 29 int oif;
30 int err; 30 int err;
31 31
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 8e8d1f17d77a..7602c79a389b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -224,7 +224,7 @@ static void inetdev_destroy(struct in_device *in_dev)
224 call_rcu(&in_dev->rcu_head, in_dev_rcu_put); 224 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
225} 225}
226 226
227int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b) 227int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
228{ 228{
229 rcu_read_lock(); 229 rcu_read_lock();
230 for_primary_ifa(in_dev) { 230 for_primary_ifa(in_dev) {
@@ -429,8 +429,8 @@ struct in_device *inetdev_by_index(int ifindex)
429 429
430/* Called only from RTNL semaphored context. No locks. */ 430/* Called only from RTNL semaphored context. No locks. */
431 431
432struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, 432struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
433 u32 mask) 433 __be32 mask)
434{ 434{
435 ASSERT_RTNL(); 435 ASSERT_RTNL();
436 436
@@ -467,7 +467,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
467 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; 467 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
468 ifap = &ifa->ifa_next) { 468 ifap = &ifa->ifa_next) {
469 if (tb[IFA_LOCAL] && 469 if (tb[IFA_LOCAL] &&
470 ifa->ifa_local != nla_get_u32(tb[IFA_LOCAL])) 470 ifa->ifa_local != nla_get_be32(tb[IFA_LOCAL]))
471 continue; 471 continue;
472 472
473 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) 473 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
@@ -475,7 +475,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
475 475
476 if (tb[IFA_ADDRESS] && 476 if (tb[IFA_ADDRESS] &&
477 (ifm->ifa_prefixlen != ifa->ifa_prefixlen || 477 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
478 !inet_ifa_match(nla_get_u32(tb[IFA_ADDRESS]), ifa))) 478 !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
479 continue; 479 continue;
480 480
481 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid); 481 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid);
@@ -540,14 +540,14 @@ static struct in_ifaddr *rtm_to_ifaddr(struct nlmsghdr *nlh)
540 ifa->ifa_scope = ifm->ifa_scope; 540 ifa->ifa_scope = ifm->ifa_scope;
541 ifa->ifa_dev = in_dev; 541 ifa->ifa_dev = in_dev;
542 542
543 ifa->ifa_local = nla_get_u32(tb[IFA_LOCAL]); 543 ifa->ifa_local = nla_get_be32(tb[IFA_LOCAL]);
544 ifa->ifa_address = nla_get_u32(tb[IFA_ADDRESS]); 544 ifa->ifa_address = nla_get_be32(tb[IFA_ADDRESS]);
545 545
546 if (tb[IFA_BROADCAST]) 546 if (tb[IFA_BROADCAST])
547 ifa->ifa_broadcast = nla_get_u32(tb[IFA_BROADCAST]); 547 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
548 548
549 if (tb[IFA_ANYCAST]) 549 if (tb[IFA_ANYCAST])
550 ifa->ifa_anycast = nla_get_u32(tb[IFA_ANYCAST]); 550 ifa->ifa_anycast = nla_get_be32(tb[IFA_ANYCAST]);
551 551
552 if (tb[IFA_LABEL]) 552 if (tb[IFA_LABEL])
553 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); 553 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
@@ -805,7 +805,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
805 break; 805 break;
806 ret = 0; 806 ret = 0;
807 if (ifa->ifa_mask != sin->sin_addr.s_addr) { 807 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
808 u32 old_mask = ifa->ifa_mask; 808 __be32 old_mask = ifa->ifa_mask;
809 inet_del_ifa(in_dev, ifap, 0); 809 inet_del_ifa(in_dev, ifap, 0);
810 ifa->ifa_mask = sin->sin_addr.s_addr; 810 ifa->ifa_mask = sin->sin_addr.s_addr;
811 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); 811 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
@@ -876,9 +876,9 @@ out:
876 return done; 876 return done;
877} 877}
878 878
879u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) 879__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
880{ 880{
881 u32 addr = 0; 881 __be32 addr = 0;
882 struct in_device *in_dev; 882 struct in_device *in_dev;
883 883
884 rcu_read_lock(); 884 rcu_read_lock();
@@ -927,11 +927,11 @@ out:
927 return addr; 927 return addr;
928} 928}
929 929
930static u32 confirm_addr_indev(struct in_device *in_dev, u32 dst, 930static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
931 u32 local, int scope) 931 __be32 local, int scope)
932{ 932{
933 int same = 0; 933 int same = 0;
934 u32 addr = 0; 934 __be32 addr = 0;
935 935
936 for_ifa(in_dev) { 936 for_ifa(in_dev) {
937 if (!addr && 937 if (!addr &&
@@ -971,9 +971,9 @@ static u32 confirm_addr_indev(struct in_device *in_dev, u32 dst,
971 * - local: address, 0=autoselect the local address 971 * - local: address, 0=autoselect the local address
972 * - scope: maximum allowed scope value for the local address 972 * - scope: maximum allowed scope value for the local address
973 */ 973 */
974u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scope) 974__be32 inet_confirm_addr(const struct net_device *dev, __be32 dst, __be32 local, int scope)
975{ 975{
976 u32 addr = 0; 976 __be32 addr = 0;
977 struct in_device *in_dev; 977 struct in_device *in_dev;
978 978
979 if (dev) { 979 if (dev) {
@@ -1138,16 +1138,16 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1138 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 1138 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1139 1139
1140 if (ifa->ifa_address) 1140 if (ifa->ifa_address)
1141 NLA_PUT_U32(skb, IFA_ADDRESS, ifa->ifa_address); 1141 NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address);
1142 1142
1143 if (ifa->ifa_local) 1143 if (ifa->ifa_local)
1144 NLA_PUT_U32(skb, IFA_LOCAL, ifa->ifa_local); 1144 NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local);
1145 1145
1146 if (ifa->ifa_broadcast) 1146 if (ifa->ifa_broadcast)
1147 NLA_PUT_U32(skb, IFA_BROADCAST, ifa->ifa_broadcast); 1147 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
1148 1148
1149 if (ifa->ifa_anycast) 1149 if (ifa->ifa_anycast)
1150 NLA_PUT_U32(skb, IFA_ANYCAST, ifa->ifa_anycast); 1150 NLA_PUT_BE32(skb, IFA_ANYCAST, ifa->ifa_anycast);
1151 1151
1152 if (ifa->ifa_label[0]) 1152 if (ifa->ifa_label[0])
1153 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); 1153 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 13b29360d102..b5c205b57669 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -253,7 +253,8 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
253 * as per draft-ietf-ipsec-udp-encaps-06, 253 * as per draft-ietf-ipsec-udp-encaps-06,
254 * section 3.1.2 254 * section 3.1.2
255 */ 255 */
256 if (x->props.mode == XFRM_MODE_TRANSPORT) 256 if (x->props.mode == XFRM_MODE_TRANSPORT ||
257 x->props.mode == XFRM_MODE_BEET)
257 skb->ip_summed = CHECKSUM_UNNECESSARY; 258 skb->ip_summed = CHECKSUM_UNNECESSARY;
258 } 259 }
259 260
@@ -271,17 +272,28 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
271{ 272{
272 struct esp_data *esp = x->data; 273 struct esp_data *esp = x->data;
273 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 274 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
274 275 int enclen = 0;
275 if (x->props.mode == XFRM_MODE_TUNNEL) { 276
276 mtu = ALIGN(mtu + 2, blksize); 277 switch (x->props.mode) {
277 } else { 278 case XFRM_MODE_TUNNEL:
278 /* The worst case. */ 279 mtu = ALIGN(mtu +2, blksize);
280 break;
281 default:
282 case XFRM_MODE_TRANSPORT:
283 /* The worst case */
279 mtu = ALIGN(mtu + 2, 4) + blksize - 4; 284 mtu = ALIGN(mtu + 2, 4) + blksize - 4;
285 break;
286 case XFRM_MODE_BEET:
287 /* The worst case. */
288 enclen = IPV4_BEET_PHMAXLEN;
289 mtu = ALIGN(mtu + enclen + 2, blksize);
290 break;
280 } 291 }
292
281 if (esp->conf.padlen) 293 if (esp->conf.padlen)
282 mtu = ALIGN(mtu, esp->conf.padlen); 294 mtu = ALIGN(mtu, esp->conf.padlen);
283 295
284 return mtu + x->props.header_len + esp->auth.icv_trunc_len; 296 return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen;
285} 297}
286 298
287static void esp4_err(struct sk_buff *skb, u32 info) 299static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cfb527c060e4..af0190d8b6c0 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -122,7 +122,7 @@ static void fib_flush(void)
122 * Find the first device with a given source address. 122 * Find the first device with a given source address.
123 */ 123 */
124 124
125struct net_device * ip_dev_find(u32 addr) 125struct net_device * ip_dev_find(__be32 addr)
126{ 126{
127 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; 127 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
128 struct fib_result res; 128 struct fib_result res;
@@ -146,7 +146,7 @@ out:
146 return dev; 146 return dev;
147} 147}
148 148
149unsigned inet_addr_type(u32 addr) 149unsigned inet_addr_type(__be32 addr)
150{ 150{
151 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; 151 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
152 struct fib_result res; 152 struct fib_result res;
@@ -180,8 +180,8 @@ unsigned inet_addr_type(u32 addr)
180 - check, that packet arrived from expected physical interface. 180 - check, that packet arrived from expected physical interface.
181 */ 181 */
182 182
183int fib_validate_source(u32 src, u32 dst, u8 tos, int oif, 183int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
184 struct net_device *dev, u32 *spec_dst, u32 *itag) 184 struct net_device *dev, __be32 *spec_dst, u32 *itag)
185{ 185{
186 struct in_device *in_dev; 186 struct in_device *in_dev;
187 struct flowi fl = { .nl_u = { .ip4_u = 187 struct flowi fl = { .nl_u = { .ip4_u =
@@ -253,7 +253,7 @@ e_inval:
253 253
254#ifndef CONFIG_IP_NOSIOCRT 254#ifndef CONFIG_IP_NOSIOCRT
255 255
256static inline u32 sk_extract_addr(struct sockaddr *addr) 256static inline __be32 sk_extract_addr(struct sockaddr *addr)
257{ 257{
258 return ((struct sockaddr_in *) addr)->sin_addr.s_addr; 258 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
259} 259}
@@ -273,7 +273,7 @@ static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
273static int rtentry_to_fib_config(int cmd, struct rtentry *rt, 273static int rtentry_to_fib_config(int cmd, struct rtentry *rt,
274 struct fib_config *cfg) 274 struct fib_config *cfg)
275{ 275{
276 u32 addr; 276 __be32 addr;
277 int plen; 277 int plen;
278 278
279 memset(cfg, 0, sizeof(*cfg)); 279 memset(cfg, 0, sizeof(*cfg));
@@ -292,7 +292,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt,
292 plen = 32; 292 plen = 32;
293 addr = sk_extract_addr(&rt->rt_dst); 293 addr = sk_extract_addr(&rt->rt_dst);
294 if (!(rt->rt_flags & RTF_HOST)) { 294 if (!(rt->rt_flags & RTF_HOST)) {
295 u32 mask = sk_extract_addr(&rt->rt_genmask); 295 __be32 mask = sk_extract_addr(&rt->rt_genmask);
296 296
297 if (rt->rt_genmask.sa_family != AF_INET) { 297 if (rt->rt_genmask.sa_family != AF_INET) {
298 if (mask || rt->rt_genmask.sa_family) 298 if (mask || rt->rt_genmask.sa_family)
@@ -482,9 +482,7 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh,
482 memset(cfg, 0, sizeof(*cfg)); 482 memset(cfg, 0, sizeof(*cfg));
483 483
484 rtm = nlmsg_data(nlh); 484 rtm = nlmsg_data(nlh);
485 cfg->fc_family = rtm->rtm_family;
486 cfg->fc_dst_len = rtm->rtm_dst_len; 485 cfg->fc_dst_len = rtm->rtm_dst_len;
487 cfg->fc_src_len = rtm->rtm_src_len;
488 cfg->fc_tos = rtm->rtm_tos; 486 cfg->fc_tos = rtm->rtm_tos;
489 cfg->fc_table = rtm->rtm_table; 487 cfg->fc_table = rtm->rtm_table;
490 cfg->fc_protocol = rtm->rtm_protocol; 488 cfg->fc_protocol = rtm->rtm_protocol;
@@ -499,22 +497,19 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh,
499 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) { 497 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
500 switch (attr->nla_type) { 498 switch (attr->nla_type) {
501 case RTA_DST: 499 case RTA_DST:
502 cfg->fc_dst = nla_get_u32(attr); 500 cfg->fc_dst = nla_get_be32(attr);
503 break;
504 case RTA_SRC:
505 cfg->fc_src = nla_get_u32(attr);
506 break; 501 break;
507 case RTA_OIF: 502 case RTA_OIF:
508 cfg->fc_oif = nla_get_u32(attr); 503 cfg->fc_oif = nla_get_u32(attr);
509 break; 504 break;
510 case RTA_GATEWAY: 505 case RTA_GATEWAY:
511 cfg->fc_gw = nla_get_u32(attr); 506 cfg->fc_gw = nla_get_be32(attr);
512 break; 507 break;
513 case RTA_PRIORITY: 508 case RTA_PRIORITY:
514 cfg->fc_priority = nla_get_u32(attr); 509 cfg->fc_priority = nla_get_u32(attr);
515 break; 510 break;
516 case RTA_PREFSRC: 511 case RTA_PREFSRC:
517 cfg->fc_prefsrc = nla_get_u32(attr); 512 cfg->fc_prefsrc = nla_get_be32(attr);
518 break; 513 break;
519 case RTA_METRICS: 514 case RTA_METRICS:
520 cfg->fc_mx = nla_data(attr); 515 cfg->fc_mx = nla_data(attr);
@@ -627,8 +622,7 @@ out:
627 only when netlink is already locked. 622 only when netlink is already locked.
628 */ 623 */
629 624
630static void fib_magic(int cmd, int type, u32 dst, int dst_len, 625static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
631 struct in_ifaddr *ifa)
632{ 626{
633 struct fib_table *tb; 627 struct fib_table *tb;
634 struct fib_config cfg = { 628 struct fib_config cfg = {
@@ -667,9 +661,9 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
667 struct in_device *in_dev = ifa->ifa_dev; 661 struct in_device *in_dev = ifa->ifa_dev;
668 struct net_device *dev = in_dev->dev; 662 struct net_device *dev = in_dev->dev;
669 struct in_ifaddr *prim = ifa; 663 struct in_ifaddr *prim = ifa;
670 u32 mask = ifa->ifa_mask; 664 __be32 mask = ifa->ifa_mask;
671 u32 addr = ifa->ifa_local; 665 __be32 addr = ifa->ifa_local;
672 u32 prefix = ifa->ifa_address&mask; 666 __be32 prefix = ifa->ifa_address&mask;
673 667
674 if (ifa->ifa_flags&IFA_F_SECONDARY) { 668 if (ifa->ifa_flags&IFA_F_SECONDARY) {
675 prim = inet_ifa_byprefix(in_dev, prefix, mask); 669 prim = inet_ifa_byprefix(in_dev, prefix, mask);
@@ -685,7 +679,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
685 return; 679 return;
686 680
687 /* Add broadcast address, if it is explicitly assigned. */ 681 /* Add broadcast address, if it is explicitly assigned. */
688 if (ifa->ifa_broadcast && ifa->ifa_broadcast != 0xFFFFFFFF) 682 if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
689 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); 683 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
690 684
691 if (!ZERONET(prefix) && !(ifa->ifa_flags&IFA_F_SECONDARY) && 685 if (!ZERONET(prefix) && !(ifa->ifa_flags&IFA_F_SECONDARY) &&
@@ -707,8 +701,8 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
707 struct net_device *dev = in_dev->dev; 701 struct net_device *dev = in_dev->dev;
708 struct in_ifaddr *ifa1; 702 struct in_ifaddr *ifa1;
709 struct in_ifaddr *prim = ifa; 703 struct in_ifaddr *prim = ifa;
710 u32 brd = ifa->ifa_address|~ifa->ifa_mask; 704 __be32 brd = ifa->ifa_address|~ifa->ifa_mask;
711 u32 any = ifa->ifa_address&ifa->ifa_mask; 705 __be32 any = ifa->ifa_address&ifa->ifa_mask;
712#define LOCAL_OK 1 706#define LOCAL_OK 1
713#define BRD_OK 2 707#define BRD_OK 2
714#define BRD0_OK 4 708#define BRD0_OK 4
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 88133b383dc5..107bb6cbb0b3 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -51,7 +51,7 @@ static kmem_cache_t *fn_alias_kmem __read_mostly;
51struct fib_node { 51struct fib_node {
52 struct hlist_node fn_hash; 52 struct hlist_node fn_hash;
53 struct list_head fn_alias; 53 struct list_head fn_alias;
54 u32 fn_key; 54 __be32 fn_key;
55}; 55};
56 56
57struct fn_zone { 57struct fn_zone {
@@ -64,7 +64,7 @@ struct fn_zone {
64#define FZ_HASHMASK(fz) ((fz)->fz_hashmask) 64#define FZ_HASHMASK(fz) ((fz)->fz_hashmask)
65 65
66 int fz_order; /* Zone order */ 66 int fz_order; /* Zone order */
67 u32 fz_mask; 67 __be32 fz_mask;
68#define FZ_MASK(fz) ((fz)->fz_mask) 68#define FZ_MASK(fz) ((fz)->fz_mask)
69}; 69};
70 70
@@ -77,7 +77,7 @@ struct fn_hash {
77 struct fn_zone *fn_zone_list; 77 struct fn_zone *fn_zone_list;
78}; 78};
79 79
80static inline u32 fn_hash(u32 key, struct fn_zone *fz) 80static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
81{ 81{
82 u32 h = ntohl(key)>>(32 - fz->fz_order); 82 u32 h = ntohl(key)>>(32 - fz->fz_order);
83 h ^= (h>>20); 83 h ^= (h>>20);
@@ -87,7 +87,7 @@ static inline u32 fn_hash(u32 key, struct fn_zone *fz)
87 return h; 87 return h;
88} 88}
89 89
90static inline u32 fz_key(u32 dst, struct fn_zone *fz) 90static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
91{ 91{
92 return dst & FZ_MASK(fz); 92 return dst & FZ_MASK(fz);
93} 93}
@@ -254,7 +254,7 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
254 struct hlist_head *head; 254 struct hlist_head *head;
255 struct hlist_node *node; 255 struct hlist_node *node;
256 struct fib_node *f; 256 struct fib_node *f;
257 u32 k = fz_key(flp->fl4_dst, fz); 257 __be32 k = fz_key(flp->fl4_dst, fz);
258 258
259 head = &fz->fz_hash[fn_hash(k, fz)]; 259 head = &fz->fz_hash[fn_hash(k, fz)];
260 hlist_for_each_entry(f, node, head, fn_hash) { 260 hlist_for_each_entry(f, node, head, fn_hash) {
@@ -365,7 +365,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
365} 365}
366 366
367/* Return the node in FZ matching KEY. */ 367/* Return the node in FZ matching KEY. */
368static struct fib_node *fib_find_node(struct fn_zone *fz, u32 key) 368static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
369{ 369{
370 struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)]; 370 struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
371 struct hlist_node *node; 371 struct hlist_node *node;
@@ -387,7 +387,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
387 struct fn_zone *fz; 387 struct fn_zone *fz;
388 struct fib_info *fi; 388 struct fib_info *fi;
389 u8 tos = cfg->fc_tos; 389 u8 tos = cfg->fc_tos;
390 u32 key; 390 __be32 key;
391 int err; 391 int err;
392 392
393 if (cfg->fc_dst_len > 32) 393 if (cfg->fc_dst_len > 32)
@@ -541,7 +541,7 @@ static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg)
541 struct fib_node *f; 541 struct fib_node *f;
542 struct fib_alias *fa, *fa_to_delete; 542 struct fib_alias *fa, *fa_to_delete;
543 struct fn_zone *fz; 543 struct fn_zone *fz;
544 u32 key; 544 __be32 key;
545 545
546 if (cfg->fc_dst_len > 32) 546 if (cfg->fc_dst_len > 32)
547 return -EINVAL; 547 return -EINVAL;
@@ -966,7 +966,7 @@ static void fib_seq_stop(struct seq_file *seq, void *v)
966 read_unlock(&fib_hash_lock); 966 read_unlock(&fib_hash_lock);
967} 967}
968 968
969static unsigned fib_flag_trans(int type, u32 mask, struct fib_info *fi) 969static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
970{ 970{
971 static const unsigned type2flags[RTN_MAX + 1] = { 971 static const unsigned type2flags[RTN_MAX + 1] = {
972 [7] = RTF_REJECT, [8] = RTF_REJECT, 972 [7] = RTF_REJECT, [8] = RTF_REJECT,
@@ -975,7 +975,7 @@ static unsigned fib_flag_trans(int type, u32 mask, struct fib_info *fi)
975 975
976 if (fi && fi->fib_nh->nh_gw) 976 if (fi && fi->fib_nh->nh_gw)
977 flags |= RTF_GATEWAY; 977 flags |= RTF_GATEWAY;
978 if (mask == 0xFFFFFFFF) 978 if (mask == htonl(0xFFFFFFFF))
979 flags |= RTF_HOST; 979 flags |= RTF_HOST;
980 flags |= RTF_UP; 980 flags |= RTF_UP;
981 return flags; 981 return flags;
@@ -991,7 +991,7 @@ static int fib_seq_show(struct seq_file *seq, void *v)
991{ 991{
992 struct fib_iter_state *iter; 992 struct fib_iter_state *iter;
993 char bf[128]; 993 char bf[128];
994 u32 prefix, mask; 994 __be32 prefix, mask;
995 unsigned flags; 995 unsigned flags;
996 struct fib_node *f; 996 struct fib_node *f;
997 struct fib_alias *fa; 997 struct fib_alias *fa;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index fd6f7769f8ab..0e8b70bad4e1 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -20,16 +20,16 @@ struct fib_alias {
20/* Exported by fib_semantics.c */ 20/* Exported by fib_semantics.c */
21extern int fib_semantic_match(struct list_head *head, 21extern int fib_semantic_match(struct list_head *head,
22 const struct flowi *flp, 22 const struct flowi *flp,
23 struct fib_result *res, __u32 zone, __u32 mask, 23 struct fib_result *res, __be32 zone, __be32 mask,
24 int prefixlen); 24 int prefixlen);
25extern void fib_release_info(struct fib_info *); 25extern void fib_release_info(struct fib_info *);
26extern struct fib_info *fib_create_info(struct fib_config *cfg); 26extern struct fib_info *fib_create_info(struct fib_config *cfg);
27extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); 27extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
28extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 28extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
29 u32 tb_id, u8 type, u8 scope, u32 dst, 29 u32 tb_id, u8 type, u8 scope, __be32 dst,
30 int dst_len, u8 tos, struct fib_info *fi, 30 int dst_len, u8 tos, struct fib_info *fi,
31 unsigned int); 31 unsigned int);
32extern void rtmsg_fib(int event, u32 key, struct fib_alias *fa, 32extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
33 int dst_len, u32 tb_id, struct nl_info *info); 33 int dst_len, u32 tb_id, struct nl_info *info);
34extern struct fib_alias *fib_find_alias(struct list_head *fah, 34extern struct fib_alias *fib_find_alias(struct list_head *fah,
35 u8 tos, u32 prio); 35 u8 tos, u32 prio);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 52b2adae4f22..0852b9cd065a 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -40,10 +40,10 @@ struct fib4_rule
40 u8 dst_len; 40 u8 dst_len;
41 u8 src_len; 41 u8 src_len;
42 u8 tos; 42 u8 tos;
43 u32 src; 43 __be32 src;
44 u32 srcmask; 44 __be32 srcmask;
45 u32 dst; 45 __be32 dst;
46 u32 dstmask; 46 __be32 dstmask;
47#ifdef CONFIG_IP_ROUTE_FWMARK 47#ifdef CONFIG_IP_ROUTE_FWMARK
48 u32 fwmark; 48 u32 fwmark;
49 u32 fwmask; 49 u32 fwmask;
@@ -150,8 +150,8 @@ void fib_select_default(const struct flowi *flp, struct fib_result *res)
150static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) 150static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
151{ 151{
152 struct fib4_rule *r = (struct fib4_rule *) rule; 152 struct fib4_rule *r = (struct fib4_rule *) rule;
153 u32 daddr = fl->fl4_dst; 153 __be32 daddr = fl->fl4_dst;
154 u32 saddr = fl->fl4_src; 154 __be32 saddr = fl->fl4_src;
155 155
156 if (((saddr ^ r->src) & r->srcmask) || 156 if (((saddr ^ r->src) & r->srcmask) ||
157 ((daddr ^ r->dst) & r->dstmask)) 157 ((daddr ^ r->dst) & r->dstmask))
@@ -215,10 +215,10 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
215 } 215 }
216 216
217 if (tb[FRA_SRC]) 217 if (tb[FRA_SRC])
218 rule4->src = nla_get_u32(tb[FRA_SRC]); 218 rule4->src = nla_get_be32(tb[FRA_SRC]);
219 219
220 if (tb[FRA_DST]) 220 if (tb[FRA_DST])
221 rule4->dst = nla_get_u32(tb[FRA_DST]); 221 rule4->dst = nla_get_be32(tb[FRA_DST]);
222 222
223#ifdef CONFIG_IP_ROUTE_FWMARK 223#ifdef CONFIG_IP_ROUTE_FWMARK
224 if (tb[FRA_FWMARK]) { 224 if (tb[FRA_FWMARK]) {
@@ -277,10 +277,10 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
277 return 0; 277 return 0;
278#endif 278#endif
279 279
280 if (tb[FRA_SRC] && (rule4->src != nla_get_u32(tb[FRA_SRC]))) 280 if (tb[FRA_SRC] && (rule4->src != nla_get_be32(tb[FRA_SRC])))
281 return 0; 281 return 0;
282 282
283 if (tb[FRA_DST] && (rule4->dst != nla_get_u32(tb[FRA_DST]))) 283 if (tb[FRA_DST] && (rule4->dst != nla_get_be32(tb[FRA_DST])))
284 return 0; 284 return 0;
285 285
286 return 1; 286 return 1;
@@ -305,10 +305,10 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
305#endif 305#endif
306 306
307 if (rule4->dst_len) 307 if (rule4->dst_len)
308 NLA_PUT_U32(skb, FRA_DST, rule4->dst); 308 NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
309 309
310 if (rule4->src_len) 310 if (rule4->src_len)
311 NLA_PUT_U32(skb, FRA_SRC, rule4->src); 311 NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
312 312
313#ifdef CONFIG_NET_CLS_ROUTE 313#ifdef CONFIG_NET_CLS_ROUTE
314 if (rule4->tclassid) 314 if (rule4->tclassid)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 2ead09543f68..884d176e0082 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -203,7 +203,7 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
203 unsigned int val = fi->fib_nhs; 203 unsigned int val = fi->fib_nhs;
204 204
205 val ^= fi->fib_protocol; 205 val ^= fi->fib_protocol;
206 val ^= fi->fib_prefsrc; 206 val ^= (__force u32)fi->fib_prefsrc;
207 val ^= fi->fib_priority; 207 val ^= fi->fib_priority;
208 208
209 return (val ^ (val >> 7) ^ (val >> 12)) & mask; 209 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
@@ -248,7 +248,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val)
248 Used only by redirect accept routine. 248 Used only by redirect accept routine.
249 */ 249 */
250 250
251int ip_fib_check_default(u32 gw, struct net_device *dev) 251int ip_fib_check_default(__be32 gw, struct net_device *dev)
252{ 252{
253 struct hlist_head *head; 253 struct hlist_head *head;
254 struct hlist_node *node; 254 struct hlist_node *node;
@@ -273,7 +273,7 @@ int ip_fib_check_default(u32 gw, struct net_device *dev)
273 return -1; 273 return -1;
274} 274}
275 275
276void rtmsg_fib(int event, u32 key, struct fib_alias *fa, 276void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
277 int dst_len, u32 tb_id, struct nl_info *info) 277 int dst_len, u32 tb_id, struct nl_info *info)
278{ 278{
279 struct sk_buff *skb; 279 struct sk_buff *skb;
@@ -374,7 +374,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
374 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 374 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
375 375
376 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 376 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
377 nh->nh_gw = nla ? nla_get_u32(nla) : 0; 377 nh->nh_gw = nla ? nla_get_be32(nla) : 0;
378#ifdef CONFIG_NET_CLS_ROUTE 378#ifdef CONFIG_NET_CLS_ROUTE
379 nla = nla_find(attrs, attrlen, RTA_FLOW); 379 nla = nla_find(attrs, attrlen, RTA_FLOW);
380 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 380 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
@@ -427,7 +427,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
427 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 427 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
428 428
429 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 429 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
430 if (nla && nla_get_u32(nla) != nh->nh_gw) 430 if (nla && nla_get_be32(nla) != nh->nh_gw)
431 return 1; 431 return 1;
432#ifdef CONFIG_NET_CLS_ROUTE 432#ifdef CONFIG_NET_CLS_ROUTE
433 nla = nla_find(attrs, attrlen, RTA_FLOW); 433 nla = nla_find(attrs, attrlen, RTA_FLOW);
@@ -568,11 +568,11 @@ out:
568 return 0; 568 return 0;
569} 569}
570 570
571static inline unsigned int fib_laddr_hashfn(u32 val) 571static inline unsigned int fib_laddr_hashfn(__be32 val)
572{ 572{
573 unsigned int mask = (fib_hash_size - 1); 573 unsigned int mask = (fib_hash_size - 1);
574 574
575 return (val ^ (val >> 7) ^ (val >> 14)) & mask; 575 return ((__force u32)val ^ ((__force u32)val >> 7) ^ ((__force u32)val >> 14)) & mask;
576} 576}
577 577
578static struct hlist_head *fib_hash_alloc(int bytes) 578static struct hlist_head *fib_hash_alloc(int bytes)
@@ -847,7 +847,7 @@ failure:
847 847
848/* Note! fib_semantic_match intentionally uses RCU list functions. */ 848/* Note! fib_semantic_match intentionally uses RCU list functions. */
849int fib_semantic_match(struct list_head *head, const struct flowi *flp, 849int fib_semantic_match(struct list_head *head, const struct flowi *flp,
850 struct fib_result *res, __u32 zone, __u32 mask, 850 struct fib_result *res, __be32 zone, __be32 mask,
851 int prefixlen) 851 int prefixlen)
852{ 852{
853 struct fib_alias *fa; 853 struct fib_alias *fa;
@@ -914,8 +914,7 @@ out_fill_res:
914 res->fi = fa->fa_info; 914 res->fi = fa->fa_info;
915#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 915#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
916 res->netmask = mask; 916 res->netmask = mask;
917 res->network = zone & 917 res->network = zone & inet_make_mask(prefixlen);
918 (0xFFFFFFFF >> (32 - prefixlen));
919#endif 918#endif
920 atomic_inc(&res->fi->fib_clntref); 919 atomic_inc(&res->fi->fib_clntref);
921 return 0; 920 return 0;
@@ -923,13 +922,13 @@ out_fill_res:
923 922
924/* Find appropriate source address to this destination */ 923/* Find appropriate source address to this destination */
925 924
926u32 __fib_res_prefsrc(struct fib_result *res) 925__be32 __fib_res_prefsrc(struct fib_result *res)
927{ 926{
928 return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope); 927 return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
929} 928}
930 929
931int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 930int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
932 u32 tb_id, u8 type, u8 scope, u32 dst, int dst_len, u8 tos, 931 u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
933 struct fib_info *fi, unsigned int flags) 932 struct fib_info *fi, unsigned int flags)
934{ 933{
935 struct nlmsghdr *nlh; 934 struct nlmsghdr *nlh;
@@ -952,7 +951,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
952 rtm->rtm_protocol = fi->fib_protocol; 951 rtm->rtm_protocol = fi->fib_protocol;
953 952
954 if (rtm->rtm_dst_len) 953 if (rtm->rtm_dst_len)
955 NLA_PUT_U32(skb, RTA_DST, dst); 954 NLA_PUT_BE32(skb, RTA_DST, dst);
956 955
957 if (fi->fib_priority) 956 if (fi->fib_priority)
958 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); 957 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
@@ -961,11 +960,11 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
961 goto nla_put_failure; 960 goto nla_put_failure;
962 961
963 if (fi->fib_prefsrc) 962 if (fi->fib_prefsrc)
964 NLA_PUT_U32(skb, RTA_PREFSRC, fi->fib_prefsrc); 963 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
965 964
966 if (fi->fib_nhs == 1) { 965 if (fi->fib_nhs == 1) {
967 if (fi->fib_nh->nh_gw) 966 if (fi->fib_nh->nh_gw)
968 NLA_PUT_U32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); 967 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
969 968
970 if (fi->fib_nh->nh_oif) 969 if (fi->fib_nh->nh_oif)
971 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); 970 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
@@ -993,7 +992,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
993 rtnh->rtnh_ifindex = nh->nh_oif; 992 rtnh->rtnh_ifindex = nh->nh_oif;
994 993
995 if (nh->nh_gw) 994 if (nh->nh_gw)
996 NLA_PUT_U32(skb, RTA_GATEWAY, nh->nh_gw); 995 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
997#ifdef CONFIG_NET_CLS_ROUTE 996#ifdef CONFIG_NET_CLS_ROUTE
998 if (nh->nh_tclassid) 997 if (nh->nh_tclassid)
999 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); 998 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
@@ -1018,7 +1017,7 @@ nla_put_failure:
1018 - device went down -> we must shutdown all nexthops going via it. 1017 - device went down -> we must shutdown all nexthops going via it.
1019 */ 1018 */
1020 1019
1021int fib_sync_down(u32 local, struct net_device *dev, int force) 1020int fib_sync_down(__be32 local, struct net_device *dev, int force)
1022{ 1021{
1023 int ret = 0; 1022 int ret = 0;
1024 int scope = RT_SCOPE_NOWHERE; 1023 int scope = RT_SCOPE_NOWHERE;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 9c3ff6ba6e21..d17990ec724f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1834,7 +1834,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fi
1834 int i, s_i; 1834 int i, s_i;
1835 struct fib_alias *fa; 1835 struct fib_alias *fa;
1836 1836
1837 u32 xkey = htonl(key); 1837 __be32 xkey = htonl(key);
1838 1838
1839 s_i = cb->args[4]; 1839 s_i = cb->args[4];
1840 i = 0; 1840 i = 0;
@@ -2281,7 +2281,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2281 2281
2282 if (IS_TNODE(n)) { 2282 if (IS_TNODE(n)) {
2283 struct tnode *tn = (struct tnode *) n; 2283 struct tnode *tn = (struct tnode *) n;
2284 t_key prf = ntohl(MASK_PFX(tn->key, tn->pos)); 2284 __be32 prf = htonl(MASK_PFX(tn->key, tn->pos));
2285 2285
2286 if (!NODE_PARENT(n)) { 2286 if (!NODE_PARENT(n)) {
2287 if (iter->trie == trie_local) 2287 if (iter->trie == trie_local)
@@ -2297,7 +2297,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2297 } else { 2297 } else {
2298 struct leaf *l = (struct leaf *) n; 2298 struct leaf *l = (struct leaf *) n;
2299 int i; 2299 int i;
2300 u32 val = ntohl(l->key); 2300 __be32 val = htonl(l->key);
2301 2301
2302 seq_indent(seq, iter->depth); 2302 seq_indent(seq, iter->depth);
2303 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val)); 2303 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
@@ -2360,7 +2360,7 @@ static struct file_operations fib_trie_fops = {
2360 .release = seq_release_private, 2360 .release = seq_release_private,
2361}; 2361};
2362 2362
2363static unsigned fib_flag_trans(int type, u32 mask, const struct fib_info *fi) 2363static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2364{ 2364{
2365 static unsigned type2flags[RTN_MAX + 1] = { 2365 static unsigned type2flags[RTN_MAX + 1] = {
2366 [7] = RTF_REJECT, [8] = RTF_REJECT, 2366 [7] = RTF_REJECT, [8] = RTF_REJECT,
@@ -2369,7 +2369,7 @@ static unsigned fib_flag_trans(int type, u32 mask, const struct fib_info *fi)
2369 2369
2370 if (fi && fi->fib_nh->nh_gw) 2370 if (fi && fi->fib_nh->nh_gw)
2371 flags |= RTF_GATEWAY; 2371 flags |= RTF_GATEWAY;
2372 if (mask == 0xFFFFFFFF) 2372 if (mask == htonl(0xFFFFFFFF))
2373 flags |= RTF_HOST; 2373 flags |= RTF_HOST;
2374 flags |= RTF_UP; 2374 flags |= RTF_UP;
2375 return flags; 2375 return flags;
@@ -2403,7 +2403,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2403 for (i=32; i>=0; i--) { 2403 for (i=32; i>=0; i--) {
2404 struct leaf_info *li = find_leaf_info(l, i); 2404 struct leaf_info *li = find_leaf_info(l, i);
2405 struct fib_alias *fa; 2405 struct fib_alias *fa;
2406 u32 mask, prefix; 2406 __be32 mask, prefix;
2407 2407
2408 if (!li) 2408 if (!li)
2409 continue; 2409 continue;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c2ad07e48ab4..b39a37a47545 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -104,7 +104,7 @@ struct icmp_bxm {
104 104
105 struct { 105 struct {
106 struct icmphdr icmph; 106 struct icmphdr icmph;
107 __u32 times[3]; 107 __be32 times[3];
108 } data; 108 } data;
109 int head_len; 109 int head_len;
110 struct ip_options replyopts; 110 struct ip_options replyopts;
@@ -381,7 +381,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
381 struct inet_sock *inet = inet_sk(sk); 381 struct inet_sock *inet = inet_sk(sk);
382 struct ipcm_cookie ipc; 382 struct ipcm_cookie ipc;
383 struct rtable *rt = (struct rtable *)skb->dst; 383 struct rtable *rt = (struct rtable *)skb->dst;
384 u32 daddr; 384 __be32 daddr;
385 385
386 if (ip_options_echo(&icmp_param->replyopts, skb)) 386 if (ip_options_echo(&icmp_param->replyopts, skb))
387 return; 387 return;
@@ -430,14 +430,14 @@ out_unlock:
430 * MUST reply to only the first fragment. 430 * MUST reply to only the first fragment.
431 */ 431 */
432 432
433void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) 433void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
434{ 434{
435 struct iphdr *iph; 435 struct iphdr *iph;
436 int room; 436 int room;
437 struct icmp_bxm icmp_param; 437 struct icmp_bxm icmp_param;
438 struct rtable *rt = (struct rtable *)skb_in->dst; 438 struct rtable *rt = (struct rtable *)skb_in->dst;
439 struct ipcm_cookie ipc; 439 struct ipcm_cookie ipc;
440 u32 saddr; 440 __be32 saddr;
441 u8 tos; 441 u8 tos;
442 442
443 if (!rt) 443 if (!rt)
@@ -895,7 +895,7 @@ static void icmp_address_reply(struct sk_buff *skb)
895 if (in_dev->ifa_list && 895 if (in_dev->ifa_list &&
896 IN_DEV_LOG_MARTIANS(in_dev) && 896 IN_DEV_LOG_MARTIANS(in_dev) &&
897 IN_DEV_FORWARD(in_dev)) { 897 IN_DEV_FORWARD(in_dev)) {
898 u32 _mask, *mp; 898 __be32 _mask, *mp;
899 899
900 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); 900 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
901 BUG_ON(mp == NULL); 901 BUG_ON(mp == NULL);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 58be8227b0cb..6eee71647b7c 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -138,14 +138,14 @@
138 time_before(jiffies, (in_dev)->mr_v2_seen))) 138 time_before(jiffies, (in_dev)->mr_v2_seen)))
139 139
140static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); 140static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
141static void igmpv3_del_delrec(struct in_device *in_dev, __u32 multiaddr); 141static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
142static void igmpv3_clear_delrec(struct in_device *in_dev); 142static void igmpv3_clear_delrec(struct in_device *in_dev);
143static int sf_setstate(struct ip_mc_list *pmc); 143static int sf_setstate(struct ip_mc_list *pmc);
144static void sf_markstate(struct ip_mc_list *pmc); 144static void sf_markstate(struct ip_mc_list *pmc);
145#endif 145#endif
146static void ip_mc_clear_src(struct ip_mc_list *pmc); 146static void ip_mc_clear_src(struct ip_mc_list *pmc);
147static int ip_mc_add_src(struct in_device *in_dev, __u32 *pmca, int sfmode, 147static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
148 int sfcount, __u32 *psfsrc, int delta); 148 int sfcount, __be32 *psfsrc, int delta);
149 149
150static void ip_ma_put(struct ip_mc_list *im) 150static void ip_ma_put(struct ip_mc_list *im)
151{ 151{
@@ -426,7 +426,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
426 first = 1; 426 first = 1;
427 psf_prev = NULL; 427 psf_prev = NULL;
428 for (psf=*psf_list; psf; psf=psf_next) { 428 for (psf=*psf_list; psf; psf=psf_next) {
429 u32 *psrc; 429 __be32 *psrc;
430 430
431 psf_next = psf->sf_next; 431 psf_next = psf->sf_next;
432 432
@@ -439,7 +439,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
439 if (isquery) 439 if (isquery)
440 psf->sf_gsresp = 0; 440 psf->sf_gsresp = 0;
441 441
442 if (AVAILABLE(skb) < sizeof(u32) + 442 if (AVAILABLE(skb) < sizeof(__be32) +
443 first*sizeof(struct igmpv3_grec)) { 443 first*sizeof(struct igmpv3_grec)) {
444 if (truncate && !first) 444 if (truncate && !first)
445 break; /* truncate these */ 445 break; /* truncate these */
@@ -455,7 +455,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
455 skb = add_grhead(skb, pmc, type, &pgr); 455 skb = add_grhead(skb, pmc, type, &pgr);
456 first = 0; 456 first = 0;
457 } 457 }
458 psrc = (u32 *)skb_put(skb, sizeof(u32)); 458 psrc = (__be32 *)skb_put(skb, sizeof(__be32));
459 *psrc = psf->sf_inaddr; 459 *psrc = psf->sf_inaddr;
460 scount++; stotal++; 460 scount++; stotal++;
461 if ((type == IGMPV3_ALLOW_NEW_SOURCES || 461 if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
@@ -630,8 +630,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
630 struct igmphdr *ih; 630 struct igmphdr *ih;
631 struct rtable *rt; 631 struct rtable *rt;
632 struct net_device *dev = in_dev->dev; 632 struct net_device *dev = in_dev->dev;
633 u32 group = pmc ? pmc->multiaddr : 0; 633 __be32 group = pmc ? pmc->multiaddr : 0;
634 u32 dst; 634 __be32 dst;
635 635
636 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 636 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
637 return igmpv3_send_report(in_dev, pmc); 637 return igmpv3_send_report(in_dev, pmc);
@@ -748,7 +748,7 @@ static void igmp_timer_expire(unsigned long data)
748} 748}
749 749
750/* mark EXCLUDE-mode sources */ 750/* mark EXCLUDE-mode sources */
751static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs) 751static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
752{ 752{
753 struct ip_sf_list *psf; 753 struct ip_sf_list *psf;
754 int i, scount; 754 int i, scount;
@@ -775,7 +775,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
775 return 1; 775 return 1;
776} 776}
777 777
778static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs) 778static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
779{ 779{
780 struct ip_sf_list *psf; 780 struct ip_sf_list *psf;
781 int i, scount; 781 int i, scount;
@@ -803,7 +803,7 @@ static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
803 return 1; 803 return 1;
804} 804}
805 805
806static void igmp_heard_report(struct in_device *in_dev, u32 group) 806static void igmp_heard_report(struct in_device *in_dev, __be32 group)
807{ 807{
808 struct ip_mc_list *im; 808 struct ip_mc_list *im;
809 809
@@ -828,7 +828,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
828 struct igmphdr *ih = skb->h.igmph; 828 struct igmphdr *ih = skb->h.igmph;
829 struct igmpv3_query *ih3 = (struct igmpv3_query *)ih; 829 struct igmpv3_query *ih3 = (struct igmpv3_query *)ih;
830 struct ip_mc_list *im; 830 struct ip_mc_list *im;
831 u32 group = ih->group; 831 __be32 group = ih->group;
832 int max_delay; 832 int max_delay;
833 int mark = 0; 833 int mark = 0;
834 834
@@ -862,7 +862,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
862 ih3 = (struct igmpv3_query *) skb->h.raw; 862 ih3 = (struct igmpv3_query *) skb->h.raw;
863 if (ih3->nsrcs) { 863 if (ih3->nsrcs) {
864 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 864 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
865 + ntohs(ih3->nsrcs)*sizeof(__u32))) 865 + ntohs(ih3->nsrcs)*sizeof(__be32)))
866 return; 866 return;
867 ih3 = (struct igmpv3_query *) skb->h.raw; 867 ih3 = (struct igmpv3_query *) skb->h.raw;
868 } 868 }
@@ -985,7 +985,7 @@ drop:
985 * Add a filter to a device 985 * Add a filter to a device
986 */ 986 */
987 987
988static void ip_mc_filter_add(struct in_device *in_dev, u32 addr) 988static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
989{ 989{
990 char buf[MAX_ADDR_LEN]; 990 char buf[MAX_ADDR_LEN];
991 struct net_device *dev = in_dev->dev; 991 struct net_device *dev = in_dev->dev;
@@ -1005,7 +1005,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, u32 addr)
1005 * Remove a filter from a device 1005 * Remove a filter from a device
1006 */ 1006 */
1007 1007
1008static void ip_mc_filter_del(struct in_device *in_dev, u32 addr) 1008static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1009{ 1009{
1010 char buf[MAX_ADDR_LEN]; 1010 char buf[MAX_ADDR_LEN];
1011 struct net_device *dev = in_dev->dev; 1011 struct net_device *dev = in_dev->dev;
@@ -1055,7 +1055,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1055 spin_unlock_bh(&in_dev->mc_tomb_lock); 1055 spin_unlock_bh(&in_dev->mc_tomb_lock);
1056} 1056}
1057 1057
1058static void igmpv3_del_delrec(struct in_device *in_dev, __u32 multiaddr) 1058static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
1059{ 1059{
1060 struct ip_mc_list *pmc, *pmc_prev; 1060 struct ip_mc_list *pmc, *pmc_prev;
1061 struct ip_sf_list *psf, *psf_next; 1061 struct ip_sf_list *psf, *psf_next;
@@ -1193,7 +1193,7 @@ static void igmp_group_added(struct ip_mc_list *im)
1193 * A socket has joined a multicast group on device dev. 1193 * A socket has joined a multicast group on device dev.
1194 */ 1194 */
1195 1195
1196void ip_mc_inc_group(struct in_device *in_dev, u32 addr) 1196void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1197{ 1197{
1198 struct ip_mc_list *im; 1198 struct ip_mc_list *im;
1199 1199
@@ -1252,7 +1252,7 @@ out:
1252 * A socket has left a multicast group on device dev 1252 * A socket has left a multicast group on device dev
1253 */ 1253 */
1254 1254
1255void ip_mc_dec_group(struct in_device *in_dev, u32 addr) 1255void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1256{ 1256{
1257 struct ip_mc_list *i, **ip; 1257 struct ip_mc_list *i, **ip;
1258 1258
@@ -1402,7 +1402,7 @@ int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
1402 1402
1403 1403
1404static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, 1404static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1405 __u32 *psfsrc) 1405 __be32 *psfsrc)
1406{ 1406{
1407 struct ip_sf_list *psf, *psf_prev; 1407 struct ip_sf_list *psf, *psf_prev;
1408 int rv = 0; 1408 int rv = 0;
@@ -1450,8 +1450,8 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1450#define igmp_ifc_event(x) do { } while (0) 1450#define igmp_ifc_event(x) do { } while (0)
1451#endif 1451#endif
1452 1452
1453static int ip_mc_del_src(struct in_device *in_dev, __u32 *pmca, int sfmode, 1453static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1454 int sfcount, __u32 *psfsrc, int delta) 1454 int sfcount, __be32 *psfsrc, int delta)
1455{ 1455{
1456 struct ip_mc_list *pmc; 1456 struct ip_mc_list *pmc;
1457 int changerec = 0; 1457 int changerec = 0;
@@ -1517,7 +1517,7 @@ out_unlock:
1517 * Add multicast single-source filter to the interface list 1517 * Add multicast single-source filter to the interface list
1518 */ 1518 */
1519static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, 1519static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1520 __u32 *psfsrc, int delta) 1520 __be32 *psfsrc, int delta)
1521{ 1521{
1522 struct ip_sf_list *psf, *psf_prev; 1522 struct ip_sf_list *psf, *psf_prev;
1523 1523
@@ -1623,8 +1623,8 @@ static int sf_setstate(struct ip_mc_list *pmc)
1623/* 1623/*
1624 * Add multicast source filter list to the interface list 1624 * Add multicast source filter list to the interface list
1625 */ 1625 */
1626static int ip_mc_add_src(struct in_device *in_dev, __u32 *pmca, int sfmode, 1626static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1627 int sfcount, __u32 *psfsrc, int delta) 1627 int sfcount, __be32 *psfsrc, int delta)
1628{ 1628{
1629 struct ip_mc_list *pmc; 1629 struct ip_mc_list *pmc;
1630 int isexclude; 1630 int isexclude;
@@ -1717,7 +1717,7 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
1717int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) 1717int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1718{ 1718{
1719 int err; 1719 int err;
1720 u32 addr = imr->imr_multiaddr.s_addr; 1720 __be32 addr = imr->imr_multiaddr.s_addr;
1721 struct ip_mc_socklist *iml=NULL, *i; 1721 struct ip_mc_socklist *iml=NULL, *i;
1722 struct in_device *in_dev; 1722 struct in_device *in_dev;
1723 struct inet_sock *inet = inet_sk(sk); 1723 struct inet_sock *inet = inet_sk(sk);
@@ -1791,7 +1791,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1791 struct inet_sock *inet = inet_sk(sk); 1791 struct inet_sock *inet = inet_sk(sk);
1792 struct ip_mc_socklist *iml, **imlp; 1792 struct ip_mc_socklist *iml, **imlp;
1793 struct in_device *in_dev; 1793 struct in_device *in_dev;
1794 u32 group = imr->imr_multiaddr.s_addr; 1794 __be32 group = imr->imr_multiaddr.s_addr;
1795 u32 ifindex; 1795 u32 ifindex;
1796 int ret = -EADDRNOTAVAIL; 1796 int ret = -EADDRNOTAVAIL;
1797 1797
@@ -1829,7 +1829,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1829{ 1829{
1830 int err; 1830 int err;
1831 struct ip_mreqn imr; 1831 struct ip_mreqn imr;
1832 u32 addr = mreqs->imr_multiaddr; 1832 __be32 addr = mreqs->imr_multiaddr;
1833 struct ip_mc_socklist *pmc; 1833 struct ip_mc_socklist *pmc;
1834 struct in_device *in_dev = NULL; 1834 struct in_device *in_dev = NULL;
1835 struct inet_sock *inet = inet_sk(sk); 1835 struct inet_sock *inet = inet_sk(sk);
@@ -1883,7 +1883,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1883 rv = !0; 1883 rv = !0;
1884 for (i=0; i<psl->sl_count; i++) { 1884 for (i=0; i<psl->sl_count; i++) {
1885 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 1885 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
1886 sizeof(__u32)); 1886 sizeof(__be32));
1887 if (rv == 0) 1887 if (rv == 0)
1888 break; 1888 break;
1889 } 1889 }
@@ -1935,7 +1935,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1935 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 1935 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1936 for (i=0; i<psl->sl_count; i++) { 1936 for (i=0; i<psl->sl_count; i++) {
1937 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 1937 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
1938 sizeof(__u32)); 1938 sizeof(__be32));
1939 if (rv == 0) 1939 if (rv == 0)
1940 break; 1940 break;
1941 } 1941 }
@@ -1960,7 +1960,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1960{ 1960{
1961 int err = 0; 1961 int err = 0;
1962 struct ip_mreqn imr; 1962 struct ip_mreqn imr;
1963 u32 addr = msf->imsf_multiaddr; 1963 __be32 addr = msf->imsf_multiaddr;
1964 struct ip_mc_socklist *pmc; 1964 struct ip_mc_socklist *pmc;
1965 struct in_device *in_dev; 1965 struct in_device *in_dev;
1966 struct inet_sock *inet = inet_sk(sk); 1966 struct inet_sock *inet = inet_sk(sk);
@@ -2044,7 +2044,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2044{ 2044{
2045 int err, len, count, copycount; 2045 int err, len, count, copycount;
2046 struct ip_mreqn imr; 2046 struct ip_mreqn imr;
2047 u32 addr = msf->imsf_multiaddr; 2047 __be32 addr = msf->imsf_multiaddr;
2048 struct ip_mc_socklist *pmc; 2048 struct ip_mc_socklist *pmc;
2049 struct in_device *in_dev; 2049 struct in_device *in_dev;
2050 struct inet_sock *inet = inet_sk(sk); 2050 struct inet_sock *inet = inet_sk(sk);
@@ -2103,7 +2103,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2103{ 2103{
2104 int err, i, count, copycount; 2104 int err, i, count, copycount;
2105 struct sockaddr_in *psin; 2105 struct sockaddr_in *psin;
2106 u32 addr; 2106 __be32 addr;
2107 struct ip_mc_socklist *pmc; 2107 struct ip_mc_socklist *pmc;
2108 struct inet_sock *inet = inet_sk(sk); 2108 struct inet_sock *inet = inet_sk(sk);
2109 struct ip_sf_socklist *psl; 2109 struct ip_sf_socklist *psl;
@@ -2156,7 +2156,7 @@ done:
2156/* 2156/*
2157 * check if a multicast source filter allows delivery for a given <src,dst,intf> 2157 * check if a multicast source filter allows delivery for a given <src,dst,intf>
2158 */ 2158 */
2159int ip_mc_sf_allow(struct sock *sk, u32 loc_addr, u32 rmt_addr, int dif) 2159int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2160{ 2160{
2161 struct inet_sock *inet = inet_sk(sk); 2161 struct inet_sock *inet = inet_sk(sk);
2162 struct ip_mc_socklist *pmc; 2162 struct ip_mc_socklist *pmc;
@@ -2216,7 +2216,7 @@ void ip_mc_drop_socket(struct sock *sk)
2216 rtnl_unlock(); 2216 rtnl_unlock();
2217} 2217}
2218 2218
2219int ip_check_mc(struct in_device *in_dev, u32 mc_addr, u32 src_addr, u16 proto) 2219int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
2220{ 2220{
2221 struct ip_mc_list *im; 2221 struct ip_mc_list *im;
2222 struct ip_sf_list *psf; 2222 struct ip_sf_list *psf;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 07204391d083..96bbe2a0aa1b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -39,7 +39,7 @@ int sysctl_local_port_range[2] = { 1024, 4999 };
39int inet_csk_bind_conflict(const struct sock *sk, 39int inet_csk_bind_conflict(const struct sock *sk,
40 const struct inet_bind_bucket *tb) 40 const struct inet_bind_bucket *tb)
41{ 41{
42 const u32 sk_rcv_saddr = inet_rcv_saddr(sk); 42 const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
43 struct sock *sk2; 43 struct sock *sk2;
44 struct hlist_node *node; 44 struct hlist_node *node;
45 int reuse = sk->sk_reuse; 45 int reuse = sk->sk_reuse;
@@ -52,7 +52,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
52 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 52 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
53 if (!reuse || !sk2->sk_reuse || 53 if (!reuse || !sk2->sk_reuse ||
54 sk2->sk_state == TCP_LISTEN) { 54 sk2->sk_state == TCP_LISTEN) {
55 const u32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 55 const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
56 if (!sk2_rcv_saddr || !sk_rcv_saddr || 56 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
57 sk2_rcv_saddr == sk_rcv_saddr) 57 sk2_rcv_saddr == sk_rcv_saddr)
58 break; 58 break;
@@ -342,10 +342,10 @@ struct dst_entry* inet_csk_route_req(struct sock *sk,
342 342
343EXPORT_SYMBOL_GPL(inet_csk_route_req); 343EXPORT_SYMBOL_GPL(inet_csk_route_req);
344 344
345static inline u32 inet_synq_hash(const u32 raddr, const u16 rport, 345static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
346 const u32 rnd, const u16 synq_hsize) 346 const u32 rnd, const u16 synq_hsize)
347{ 347{
348 return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1); 348 return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
349} 349}
350 350
351#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 351#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -356,8 +356,8 @@ static inline u32 inet_synq_hash(const u32 raddr, const u16 rport,
356 356
357struct request_sock *inet_csk_search_req(const struct sock *sk, 357struct request_sock *inet_csk_search_req(const struct sock *sk,
358 struct request_sock ***prevp, 358 struct request_sock ***prevp,
359 const __u16 rport, const __u32 raddr, 359 const __be16 rport, const __be32 raddr,
360 const __u32 laddr) 360 const __be32 laddr)
361{ 361{
362 const struct inet_connection_sock *icsk = inet_csk(sk); 362 const struct inet_connection_sock *icsk = inet_csk(sk);
363 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 363 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 492858e6faf0..77761ac4f7bb 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -36,8 +36,8 @@
36static const struct inet_diag_handler **inet_diag_table; 36static const struct inet_diag_handler **inet_diag_table;
37 37
38struct inet_diag_entry { 38struct inet_diag_entry {
39 u32 *saddr; 39 __be32 *saddr;
40 u32 *daddr; 40 __be32 *daddr;
41 u16 sport; 41 u16 sport;
42 u16 dport; 42 u16 dport;
43 u16 family; 43 u16 family;
@@ -294,7 +294,7 @@ out:
294 return err; 294 return err;
295} 295}
296 296
297static int bitstring_match(const u32 *a1, const u32 *a2, int bits) 297static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
298{ 298{
299 int words = bits >> 5; 299 int words = bits >> 5;
300 300
@@ -305,8 +305,8 @@ static int bitstring_match(const u32 *a1, const u32 *a2, int bits)
305 return 0; 305 return 0;
306 } 306 }
307 if (bits) { 307 if (bits) {
308 __u32 w1, w2; 308 __be32 w1, w2;
309 __u32 mask; 309 __be32 mask;
310 310
311 w1 = a1[words]; 311 w1 = a1[words];
312 w2 = a2[words]; 312 w2 = a2[words];
@@ -352,7 +352,7 @@ static int inet_diag_bc_run(const void *bc, int len,
352 case INET_DIAG_BC_S_COND: 352 case INET_DIAG_BC_S_COND:
353 case INET_DIAG_BC_D_COND: { 353 case INET_DIAG_BC_D_COND: {
354 struct inet_diag_hostcond *cond; 354 struct inet_diag_hostcond *cond;
355 u32 *addr; 355 __be32 *addr;
356 356
357 cond = (struct inet_diag_hostcond *)(op + 1); 357 cond = (struct inet_diag_hostcond *)(op + 1);
358 if (cond->port != -1 && 358 if (cond->port != -1 &&
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index fb296c9a7f3f..244c4f445c7d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -125,7 +125,7 @@ EXPORT_SYMBOL(inet_listen_wlock);
125 * wildcarded during the search since they can never be otherwise. 125 * wildcarded during the search since they can never be otherwise.
126 */ 126 */
127static struct sock *inet_lookup_listener_slow(const struct hlist_head *head, 127static struct sock *inet_lookup_listener_slow(const struct hlist_head *head,
128 const u32 daddr, 128 const __be32 daddr,
129 const unsigned short hnum, 129 const unsigned short hnum,
130 const int dif) 130 const int dif)
131{ 131{
@@ -137,7 +137,7 @@ static struct sock *inet_lookup_listener_slow(const struct hlist_head *head,
137 const struct inet_sock *inet = inet_sk(sk); 137 const struct inet_sock *inet = inet_sk(sk);
138 138
139 if (inet->num == hnum && !ipv6_only_sock(sk)) { 139 if (inet->num == hnum && !ipv6_only_sock(sk)) {
140 const __u32 rcv_saddr = inet->rcv_saddr; 140 const __be32 rcv_saddr = inet->rcv_saddr;
141 int score = sk->sk_family == PF_INET ? 1 : 0; 141 int score = sk->sk_family == PF_INET ? 1 : 0;
142 142
143 if (rcv_saddr) { 143 if (rcv_saddr) {
@@ -163,7 +163,7 @@ static struct sock *inet_lookup_listener_slow(const struct hlist_head *head,
163 163
164/* Optimize the common listener case. */ 164/* Optimize the common listener case. */
165struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo, 165struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
166 const u32 daddr, const unsigned short hnum, 166 const __be32 daddr, const unsigned short hnum,
167 const int dif) 167 const int dif)
168{ 168{
169 struct sock *sk = NULL; 169 struct sock *sk = NULL;
@@ -197,11 +197,11 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
197{ 197{
198 struct inet_hashinfo *hinfo = death_row->hashinfo; 198 struct inet_hashinfo *hinfo = death_row->hashinfo;
199 struct inet_sock *inet = inet_sk(sk); 199 struct inet_sock *inet = inet_sk(sk);
200 u32 daddr = inet->rcv_saddr; 200 __be32 daddr = inet->rcv_saddr;
201 u32 saddr = inet->daddr; 201 __be32 saddr = inet->daddr;
202 int dif = sk->sk_bound_dev_if; 202 int dif = sk->sk_bound_dev_if;
203 INET_ADDR_COOKIE(acookie, saddr, daddr) 203 INET_ADDR_COOKIE(acookie, saddr, daddr)
204 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 204 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
205 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); 205 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
206 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 206 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
207 struct sock *sk2; 207 struct sock *sk2;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index a675602ef295..f072f3875af8 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */
94int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ 94int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */
95 95
96static struct inet_peer *inet_peer_unused_head; 96static struct inet_peer *inet_peer_unused_head;
97/* Exported for inet_putpeer inline function. */ 97static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
98struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; 98static DEFINE_SPINLOCK(inet_peer_unused_lock);
99DEFINE_SPINLOCK(inet_peer_unused_lock);
100#define PEER_MAX_CLEANUP_WORK 30
101 99
102static void peer_check_expire(unsigned long dummy); 100static void peer_check_expire(unsigned long dummy);
103static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); 101static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -163,7 +161,7 @@ static void unlink_from_unused(struct inet_peer *p)
163 for (u = peer_root; u != peer_avl_empty; ) { \ 161 for (u = peer_root; u != peer_avl_empty; ) { \
164 if (daddr == u->v4daddr) \ 162 if (daddr == u->v4daddr) \
165 break; \ 163 break; \
166 if (daddr < u->v4daddr) \ 164 if ((__force __u32)daddr < (__force __u32)u->v4daddr) \
167 v = &u->avl_left; \ 165 v = &u->avl_left; \
168 else \ 166 else \
169 v = &u->avl_right; \ 167 v = &u->avl_right; \
@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
340 spin_lock_bh(&inet_peer_unused_lock); 338 spin_lock_bh(&inet_peer_unused_lock);
341 p = inet_peer_unused_head; 339 p = inet_peer_unused_head;
342 if (p != NULL) { 340 if (p != NULL) {
343 if (time_after(p->dtime + ttl, jiffies)) { 341 __u32 delta = (__u32)jiffies - p->dtime;
342 if (delta < ttl) {
344 /* Do not prune fresh entries. */ 343 /* Do not prune fresh entries. */
345 spin_unlock_bh(&inet_peer_unused_lock); 344 spin_unlock_bh(&inet_peer_unused_lock);
346 return -1; 345 return -1;
@@ -368,7 +367,7 @@ static int cleanup_once(unsigned long ttl)
368} 367}
369 368
370/* Called with or without local BH being disabled. */ 369/* Called with or without local BH being disabled. */
371struct inet_peer *inet_getpeer(__u32 daddr, int create) 370struct inet_peer *inet_getpeer(__be32 daddr, int create)
372{ 371{
373 struct inet_peer *p, *n; 372 struct inet_peer *p, *n;
374 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; 373 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
@@ -432,7 +431,7 @@ out_free:
432/* Called with local BH disabled. */ 431/* Called with local BH disabled. */
433static void peer_check_expire(unsigned long dummy) 432static void peer_check_expire(unsigned long dummy)
434{ 433{
435 int i; 434 unsigned long now = jiffies;
436 int ttl; 435 int ttl;
437 436
438 if (peer_total >= inet_peer_threshold) 437 if (peer_total >= inet_peer_threshold)
@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
441 ttl = inet_peer_maxttl 440 ttl = inet_peer_maxttl
442 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 441 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
443 peer_total / inet_peer_threshold * HZ; 442 peer_total / inet_peer_threshold * HZ;
444 for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); 443 while (!cleanup_once(ttl)) {
444 if (jiffies != now)
445 break;
446 }
445 447
446 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 448 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
447 * interval depending on the total number of entries (more entries, 449 * interval depending on the total number of entries (more entries,
@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
455 peer_total / inet_peer_threshold * HZ; 457 peer_total / inet_peer_threshold * HZ;
456 add_timer(&peer_periodic_timer); 458 add_timer(&peer_periodic_timer);
457} 459}
460
461void inet_putpeer(struct inet_peer *p)
462{
463 spin_lock_bh(&inet_peer_unused_lock);
464 if (atomic_dec_and_test(&p->refcnt)) {
465 p->unused_prevp = inet_peer_unused_tailp;
466 p->unused_next = NULL;
467 *inet_peer_unused_tailp = p;
468 inet_peer_unused_tailp = &p->unused_next;
469 p->dtime = (__u32)jiffies;
470 }
471 spin_unlock_bh(&inet_peer_unused_lock);
472}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 165d72859ddf..74046efdf875 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -77,9 +77,9 @@ struct ipq {
77 struct hlist_node list; 77 struct hlist_node list;
78 struct list_head lru_list; /* lru list member */ 78 struct list_head lru_list; /* lru list member */
79 u32 user; 79 u32 user;
80 u32 saddr; 80 __be32 saddr;
81 u32 daddr; 81 __be32 daddr;
82 u16 id; 82 __be16 id;
83 u8 protocol; 83 u8 protocol;
84 u8 last_in; 84 u8 last_in;
85#define COMPLETE 4 85#define COMPLETE 4
@@ -123,9 +123,10 @@ static __inline__ void ipq_unlink(struct ipq *ipq)
123 write_unlock(&ipfrag_lock); 123 write_unlock(&ipfrag_lock);
124} 124}
125 125
126static unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot) 126static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
127{ 127{
128 return jhash_3words((u32)id << 16 | prot, saddr, daddr, 128 return jhash_3words((__force u32)id << 16 | prot,
129 (__force u32)saddr, (__force u32)daddr,
129 ipfrag_hash_rnd) & (IPQ_HASHSZ - 1); 130 ipfrag_hash_rnd) & (IPQ_HASHSZ - 1);
130} 131}
131 132
@@ -387,8 +388,8 @@ out_nomem:
387static inline struct ipq *ip_find(struct iphdr *iph, u32 user) 388static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
388{ 389{
389 __be16 id = iph->id; 390 __be16 id = iph->id;
390 __u32 saddr = iph->saddr; 391 __be32 saddr = iph->saddr;
391 __u32 daddr = iph->daddr; 392 __be32 daddr = iph->daddr;
392 __u8 protocol = iph->protocol; 393 __u8 protocol = iph->protocol;
393 unsigned int hash; 394 unsigned int hash;
394 struct ipq *qp; 395 struct ipq *qp;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f5fba051df3d..d5b5dec075b8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -611,8 +611,8 @@ static int ipgre_rcv(struct sk_buff *skb)
611 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 611 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
612 */ 612 */
613 if (flags == 0 && 613 if (flags == 0 &&
614 skb->protocol == __constant_htons(ETH_P_WCCP)) { 614 skb->protocol == htons(ETH_P_WCCP)) {
615 skb->protocol = __constant_htons(ETH_P_IP); 615 skb->protocol = htons(ETH_P_IP);
616 if ((*(h + offset) & 0xF0) != 0x40) 616 if ((*(h + offset) & 0xF0) != 0x40)
617 offset += 4; 617 offset += 4;
618 } 618 }
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index e7437c091326..8dabbfc31267 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -38,7 +38,7 @@
38 */ 38 */
39 39
40void ip_options_build(struct sk_buff * skb, struct ip_options * opt, 40void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
41 u32 daddr, struct rtable *rt, int is_frag) 41 __be32 daddr, struct rtable *rt, int is_frag)
42{ 42{
43 unsigned char * iph = skb->nh.raw; 43 unsigned char * iph = skb->nh.raw;
44 44
@@ -57,7 +57,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
57 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); 57 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt);
58 if (opt->ts_needtime) { 58 if (opt->ts_needtime) {
59 struct timeval tv; 59 struct timeval tv;
60 __u32 midtime; 60 __be32 midtime;
61 do_gettimeofday(&tv); 61 do_gettimeofday(&tv);
62 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); 62 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000);
63 memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); 63 memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
@@ -91,7 +91,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
91 unsigned char *sptr, *dptr; 91 unsigned char *sptr, *dptr;
92 int soffset, doffset; 92 int soffset, doffset;
93 int optlen; 93 int optlen;
94 u32 daddr; 94 __be32 daddr;
95 95
96 memset(dopt, 0, sizeof(struct ip_options)); 96 memset(dopt, 0, sizeof(struct ip_options));
97 97
@@ -148,7 +148,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
148 dopt->ts_needtime = 0; 148 dopt->ts_needtime = 0;
149 149
150 if (soffset + 8 <= optlen) { 150 if (soffset + 8 <= optlen) {
151 __u32 addr; 151 __be32 addr;
152 152
153 memcpy(&addr, sptr+soffset-1, 4); 153 memcpy(&addr, sptr+soffset-1, 4);
154 if (inet_addr_type(addr) != RTN_LOCAL) { 154 if (inet_addr_type(addr) != RTN_LOCAL) {
@@ -165,7 +165,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
165 } 165 }
166 if (sopt->srr) { 166 if (sopt->srr) {
167 unsigned char * start = sptr+sopt->srr; 167 unsigned char * start = sptr+sopt->srr;
168 u32 faddr; 168 __be32 faddr;
169 169
170 optlen = start[1]; 170 optlen = start[1];
171 soffset = start[2]; 171 soffset = start[2];
@@ -362,7 +362,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
362 goto error; 362 goto error;
363 } 363 }
364 if (optptr[2] <= optlen) { 364 if (optptr[2] <= optlen) {
365 __u32 * timeptr = NULL; 365 __be32 *timeptr = NULL;
366 if (optptr[2]+3 > optptr[1]) { 366 if (optptr[2]+3 > optptr[1]) {
367 pp_ptr = optptr + 2; 367 pp_ptr = optptr + 2;
368 goto error; 368 goto error;
@@ -371,7 +371,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
371 case IPOPT_TS_TSONLY: 371 case IPOPT_TS_TSONLY:
372 opt->ts = optptr - iph; 372 opt->ts = optptr - iph;
373 if (skb) 373 if (skb)
374 timeptr = (__u32*)&optptr[optptr[2]-1]; 374 timeptr = (__be32*)&optptr[optptr[2]-1];
375 opt->ts_needtime = 1; 375 opt->ts_needtime = 1;
376 optptr[2] += 4; 376 optptr[2] += 4;
377 break; 377 break;
@@ -383,7 +383,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
383 opt->ts = optptr - iph; 383 opt->ts = optptr - iph;
384 if (skb) { 384 if (skb) {
385 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 385 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
386 timeptr = (__u32*)&optptr[optptr[2]+3]; 386 timeptr = (__be32*)&optptr[optptr[2]+3];
387 } 387 }
388 opt->ts_needaddr = 1; 388 opt->ts_needaddr = 1;
389 opt->ts_needtime = 1; 389 opt->ts_needtime = 1;
@@ -396,12 +396,12 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
396 } 396 }
397 opt->ts = optptr - iph; 397 opt->ts = optptr - iph;
398 { 398 {
399 u32 addr; 399 __be32 addr;
400 memcpy(&addr, &optptr[optptr[2]-1], 4); 400 memcpy(&addr, &optptr[optptr[2]-1], 4);
401 if (inet_addr_type(addr) == RTN_UNICAST) 401 if (inet_addr_type(addr) == RTN_UNICAST)
402 break; 402 break;
403 if (skb) 403 if (skb)
404 timeptr = (__u32*)&optptr[optptr[2]+3]; 404 timeptr = (__be32*)&optptr[optptr[2]+3];
405 } 405 }
406 opt->ts_needtime = 1; 406 opt->ts_needtime = 1;
407 optptr[2] += 8; 407 optptr[2] += 8;
@@ -415,10 +415,10 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
415 } 415 }
416 if (timeptr) { 416 if (timeptr) {
417 struct timeval tv; 417 struct timeval tv;
418 __u32 midtime; 418 __be32 midtime;
419 do_gettimeofday(&tv); 419 do_gettimeofday(&tv);
420 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); 420 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000);
421 memcpy(timeptr, &midtime, sizeof(__u32)); 421 memcpy(timeptr, &midtime, sizeof(__be32));
422 opt->is_changed = 1; 422 opt->is_changed = 1;
423 } 423 }
424 } else { 424 } else {
@@ -607,7 +607,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
607{ 607{
608 struct ip_options *opt = &(IPCB(skb)->opt); 608 struct ip_options *opt = &(IPCB(skb)->opt);
609 int srrspace, srrptr; 609 int srrspace, srrptr;
610 u32 nexthop; 610 __be32 nexthop;
611 struct iphdr *iph = skb->nh.iph; 611 struct iphdr *iph = skb->nh.iph;
612 unsigned char * optptr = skb->nh.raw + opt->srr; 612 unsigned char * optptr = skb->nh.raw + opt->srr;
613 struct rtable *rt = (struct rtable*)skb->dst; 613 struct rtable *rt = (struct rtable*)skb->dst;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 97aee76fb746..fc195a44fc2e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -118,7 +118,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
118 * 118 *
119 */ 119 */
120int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, 120int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121 u32 saddr, u32 daddr, struct ip_options *opt) 121 __be32 saddr, __be32 daddr, struct ip_options *opt)
122{ 122{
123 struct inet_sock *inet = inet_sk(sk); 123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst; 124 struct rtable *rt = (struct rtable *)skb->dst;
@@ -306,7 +306,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
306 /* Make sure we can route this packet. */ 306 /* Make sure we can route this packet. */
307 rt = (struct rtable *)__sk_dst_check(sk, 0); 307 rt = (struct rtable *)__sk_dst_check(sk, 0);
308 if (rt == NULL) { 308 if (rt == NULL) {
309 u32 daddr; 309 __be32 daddr;
310 310
311 /* Use correct destination address if we have options. */ 311 /* Use correct destination address if we have options. */
312 daddr = inet->daddr; 312 daddr = inet->daddr;
@@ -1340,7 +1340,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1340 char data[40]; 1340 char data[40];
1341 } replyopts; 1341 } replyopts;
1342 struct ipcm_cookie ipc; 1342 struct ipcm_cookie ipc;
1343 u32 daddr; 1343 __be32 daddr;
1344 struct rtable *rt = (struct rtable*)skb->dst; 1344 struct rtable *rt = (struct rtable*)skb->dst;
1345 1345
1346 if (ip_options_echo(&replyopts.opt, skb)) 1346 if (ip_options_echo(&replyopts.opt, skb))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2d05c4133d3e..4b132953bcc2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -254,7 +254,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
254} 254}
255 255
256void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 256void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
257 u16 port, u32 info, u8 *payload) 257 __be16 port, u32 info, u8 *payload)
258{ 258{
259 struct inet_sock *inet = inet_sk(sk); 259 struct inet_sock *inet = inet_sk(sk);
260 struct sock_exterr_skb *serr; 260 struct sock_exterr_skb *serr;
@@ -283,7 +283,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
283 kfree_skb(skb); 283 kfree_skb(skb);
284} 284}
285 285
286void ip_local_error(struct sock *sk, int err, u32 daddr, u16 port, u32 info) 286void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
287{ 287{
288 struct inet_sock *inet = inet_sk(sk); 288 struct inet_sock *inet = inet_sk(sk);
289 struct sock_exterr_skb *serr; 289 struct sock_exterr_skb *serr;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 17342430a843..3839b706142e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -183,7 +183,7 @@ out_ok:
183 183
184static void ipcomp4_err(struct sk_buff *skb, u32 info) 184static void ipcomp4_err(struct sk_buff *skb, u32 info)
185{ 185{
186 u32 spi; 186 __be32 spi;
187 struct iphdr *iph = (struct iphdr *)skb->data; 187 struct iphdr *iph = (struct iphdr *)skb->data;
188 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 188 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
189 struct xfrm_state *x; 189 struct xfrm_state *x;
@@ -206,6 +206,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
206static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 206static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
207{ 207{
208 struct xfrm_state *t; 208 struct xfrm_state *t;
209 u8 mode = XFRM_MODE_TUNNEL;
209 210
210 t = xfrm_state_alloc(); 211 t = xfrm_state_alloc();
211 if (t == NULL) 212 if (t == NULL)
@@ -216,7 +217,9 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
216 t->id.daddr.a4 = x->id.daddr.a4; 217 t->id.daddr.a4 = x->id.daddr.a4;
217 memcpy(&t->sel, &x->sel, sizeof(t->sel)); 218 memcpy(&t->sel, &x->sel, sizeof(t->sel));
218 t->props.family = AF_INET; 219 t->props.family = AF_INET;
219 t->props.mode = XFRM_MODE_TUNNEL; 220 if (x->props.mode == XFRM_MODE_BEET)
221 mode = x->props.mode;
222 t->props.mode = mode;
220 t->props.saddr.a4 = x->props.saddr.a4; 223 t->props.saddr.a4 = x->props.saddr.a4;
221 t->props.flags = x->props.flags; 224 t->props.flags = x->props.flags;
222 225
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 1fbb38415b19..f8ce84759159 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -366,7 +366,7 @@ static int __init ic_defaults(void)
366 */ 366 */
367 367
368 if (!ic_host_name_set) 368 if (!ic_host_name_set)
369 sprintf(system_utsname.nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); 369 sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr));
370 370
371 if (root_server_addr == INADDR_NONE) 371 if (root_server_addr == INADDR_NONE)
372 root_server_addr = ic_servaddr; 372 root_server_addr = ic_servaddr;
@@ -805,7 +805,7 @@ static void __init ic_do_bootp_ext(u8 *ext)
805 } 805 }
806 break; 806 break;
807 case 12: /* Host name */ 807 case 12: /* Host name */
808 ic_bootp_string(system_utsname.nodename, ext+1, *ext, __NEW_UTS_LEN); 808 ic_bootp_string(utsname()->nodename, ext+1, *ext, __NEW_UTS_LEN);
809 ic_host_name_set = 1; 809 ic_host_name_set = 1;
810 break; 810 break;
811 case 15: /* Domain name (DNS) */ 811 case 15: /* Domain name (DNS) */
@@ -816,7 +816,7 @@ static void __init ic_do_bootp_ext(u8 *ext)
816 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path)); 816 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path));
817 break; 817 break;
818 case 40: /* NIS Domain name (_not_ DNS) */ 818 case 40: /* NIS Domain name (_not_ DNS) */
819 ic_bootp_string(system_utsname.domainname, ext+1, *ext, __NEW_UTS_LEN); 819 ic_bootp_string(utsname()->domainname, ext+1, *ext, __NEW_UTS_LEN);
820 break; 820 break;
821 } 821 }
822} 822}
@@ -1368,7 +1368,7 @@ static int __init ip_auto_config(void)
1368 printk(", mask=%u.%u.%u.%u", NIPQUAD(ic_netmask)); 1368 printk(", mask=%u.%u.%u.%u", NIPQUAD(ic_netmask));
1369 printk(", gw=%u.%u.%u.%u", NIPQUAD(ic_gateway)); 1369 printk(", gw=%u.%u.%u.%u", NIPQUAD(ic_gateway));
1370 printk(",\n host=%s, domain=%s, nis-domain=%s", 1370 printk(",\n host=%s, domain=%s, nis-domain=%s",
1371 system_utsname.nodename, ic_domain, system_utsname.domainname); 1371 utsname()->nodename, ic_domain, utsname()->domainname);
1372 printk(",\n bootserver=%u.%u.%u.%u", NIPQUAD(ic_servaddr)); 1372 printk(",\n bootserver=%u.%u.%u.%u", NIPQUAD(ic_servaddr));
1373 printk(", rootserver=%u.%u.%u.%u", NIPQUAD(root_server_addr)); 1373 printk(", rootserver=%u.%u.%u.%u", NIPQUAD(root_server_addr));
1374 printk(", rootpath=%s", root_server_path); 1374 printk(", rootpath=%s", root_server_path);
@@ -1478,11 +1478,11 @@ static int __init ip_auto_config_setup(char *addrs)
1478 case 4: 1478 case 4:
1479 if ((dp = strchr(ip, '.'))) { 1479 if ((dp = strchr(ip, '.'))) {
1480 *dp++ = '\0'; 1480 *dp++ = '\0';
1481 strlcpy(system_utsname.domainname, dp, 1481 strlcpy(utsname()->domainname, dp,
1482 sizeof(system_utsname.domainname)); 1482 sizeof(utsname()->domainname));
1483 } 1483 }
1484 strlcpy(system_utsname.nodename, ip, 1484 strlcpy(utsname()->nodename, ip,
1485 sizeof(system_utsname.nodename)); 1485 sizeof(utsname()->nodename));
1486 ic_host_name_set = 1; 1486 ic_host_name_set = 1;
1487 break; 1487 break;
1488 case 5: 1488 case 5:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ba49588da242..97cfa97c8abb 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -462,7 +462,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
462 return 0; 462 return 0;
463} 463}
464 464
465static struct mfc_cache *ipmr_cache_find(__u32 origin, __u32 mcastgrp) 465static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
466{ 466{
467 int line=MFC_HASH(mcastgrp,origin); 467 int line=MFC_HASH(mcastgrp,origin);
468 struct mfc_cache *c; 468 struct mfc_cache *c;
@@ -1097,7 +1097,7 @@ static struct notifier_block ip_mr_notifier={
1097 * important for multicast video. 1097 * important for multicast video.
1098 */ 1098 */
1099 1099
1100static void ip_encap(struct sk_buff *skb, u32 saddr, u32 daddr) 1100static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1101{ 1101{
1102 struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr)); 1102 struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
1103 1103
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig
index c9820bfc493a..891b9355cf96 100644
--- a/net/ipv4/ipvs/Kconfig
+++ b/net/ipv4/ipvs/Kconfig
@@ -81,7 +81,7 @@ config IP_VS_PROTO_ESP
81 bool "ESP load balancing support" 81 bool "ESP load balancing support"
82 depends on IP_VS 82 depends on IP_VS
83 ---help--- 83 ---help---
84 This option enables support for load balancing ESP (Encapsultion 84 This option enables support for load balancing ESP (Encapsulation
85 Security Payload) transport protocol. Say Y if unsure. 85 Security Payload) transport protocol. Say Y if unsure.
86 86
87config IP_VS_PROTO_AH 87config IP_VS_PROTO_AH
@@ -204,7 +204,7 @@ config IP_VS_SED
204 connections to the server with the shortest expected delay. The 204 connections to the server with the shortest expected delay. The
205 expected delay that the job will experience is (Ci + 1) / Ui if 205 expected delay that the job will experience is (Ci + 1) / Ui if
206 sent to the ith server, in which Ci is the number of connections 206 sent to the ith server, in which Ci is the number of connections
207 on the the ith server and Ui is the fixed service rate (weight) 207 on the ith server and Ui is the fixed service rate (weight)
208 of the ith server. 208 of the ith server.
209 209
210 If you want to compile it in kernel, say Y. To compile it as a 210 If you want to compile it in kernel, say Y. To compile it as a
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 87b83813cf2c..8832eb517d52 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -115,9 +115,9 @@ static inline void ct_write_unlock_bh(unsigned key)
115/* 115/*
116 * Returns hash value for IPVS connection entry 116 * Returns hash value for IPVS connection entry
117 */ 117 */
118static unsigned int ip_vs_conn_hashkey(unsigned proto, __u32 addr, __u16 port) 118static unsigned int ip_vs_conn_hashkey(unsigned proto, __be32 addr, __be16 port)
119{ 119{
120 return jhash_3words(addr, port, proto, ip_vs_conn_rnd) 120 return jhash_3words((__force u32)addr, (__force u32)port, proto, ip_vs_conn_rnd)
121 & IP_VS_CONN_TAB_MASK; 121 & IP_VS_CONN_TAB_MASK;
122} 122}
123 123
@@ -188,7 +188,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
188 * d_addr, d_port: pkt dest address (load balancer) 188 * d_addr, d_port: pkt dest address (load balancer)
189 */ 189 */
190static inline struct ip_vs_conn *__ip_vs_conn_in_get 190static inline struct ip_vs_conn *__ip_vs_conn_in_get
191(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) 191(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
192{ 192{
193 unsigned hash; 193 unsigned hash;
194 struct ip_vs_conn *cp; 194 struct ip_vs_conn *cp;
@@ -215,7 +215,7 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get
215} 215}
216 216
217struct ip_vs_conn *ip_vs_conn_in_get 217struct ip_vs_conn *ip_vs_conn_in_get
218(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) 218(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
219{ 219{
220 struct ip_vs_conn *cp; 220 struct ip_vs_conn *cp;
221 221
@@ -234,7 +234,7 @@ struct ip_vs_conn *ip_vs_conn_in_get
234 234
235/* Get reference to connection template */ 235/* Get reference to connection template */
236struct ip_vs_conn *ip_vs_ct_in_get 236struct ip_vs_conn *ip_vs_ct_in_get
237(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) 237(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
238{ 238{
239 unsigned hash; 239 unsigned hash;
240 struct ip_vs_conn *cp; 240 struct ip_vs_conn *cp;
@@ -274,7 +274,7 @@ struct ip_vs_conn *ip_vs_ct_in_get
274 * d_addr, d_port: pkt dest address (foreign host) 274 * d_addr, d_port: pkt dest address (foreign host)
275 */ 275 */
276struct ip_vs_conn *ip_vs_conn_out_get 276struct ip_vs_conn *ip_vs_conn_out_get
277(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) 277(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port)
278{ 278{
279 unsigned hash; 279 unsigned hash;
280 struct ip_vs_conn *cp, *ret=NULL; 280 struct ip_vs_conn *cp, *ret=NULL;
@@ -324,7 +324,7 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
324/* 324/*
325 * Fill a no_client_port connection with a client port number 325 * Fill a no_client_port connection with a client port number
326 */ 326 */
327void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __u16 cport) 327void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
328{ 328{
329 if (ip_vs_conn_unhash(cp)) { 329 if (ip_vs_conn_unhash(cp)) {
330 spin_lock(&cp->lock); 330 spin_lock(&cp->lock);
@@ -508,10 +508,10 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
508 /* 508 /*
509 * Invalidate the connection template 509 * Invalidate the connection template
510 */ 510 */
511 if (ct->vport != 65535) { 511 if (ct->vport != htons(0xffff)) {
512 if (ip_vs_conn_unhash(ct)) { 512 if (ip_vs_conn_unhash(ct)) {
513 ct->dport = 65535; 513 ct->dport = htons(0xffff);
514 ct->vport = 65535; 514 ct->vport = htons(0xffff);
515 ct->cport = 0; 515 ct->cport = 0;
516 ip_vs_conn_hash(ct); 516 ip_vs_conn_hash(ct);
517 } 517 }
@@ -596,8 +596,8 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
596 * Create a new connection entry and hash it into the ip_vs_conn_tab 596 * Create a new connection entry and hash it into the ip_vs_conn_tab
597 */ 597 */
598struct ip_vs_conn * 598struct ip_vs_conn *
599ip_vs_conn_new(int proto, __u32 caddr, __u16 cport, __u32 vaddr, __u16 vport, 599ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport,
600 __u32 daddr, __u16 dport, unsigned flags, 600 __be32 daddr, __be16 dport, unsigned flags,
601 struct ip_vs_dest *dest) 601 struct ip_vs_dest *dest)
602{ 602{
603 struct ip_vs_conn *cp; 603 struct ip_vs_conn *cp;
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 3f47ad8e1cad..1445bb47fea4 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -209,14 +209,14 @@ int ip_vs_make_skb_writable(struct sk_buff **pskb, int writable_len)
209static struct ip_vs_conn * 209static struct ip_vs_conn *
210ip_vs_sched_persist(struct ip_vs_service *svc, 210ip_vs_sched_persist(struct ip_vs_service *svc,
211 const struct sk_buff *skb, 211 const struct sk_buff *skb,
212 __u16 ports[2]) 212 __be16 ports[2])
213{ 213{
214 struct ip_vs_conn *cp = NULL; 214 struct ip_vs_conn *cp = NULL;
215 struct iphdr *iph = skb->nh.iph; 215 struct iphdr *iph = skb->nh.iph;
216 struct ip_vs_dest *dest; 216 struct ip_vs_dest *dest;
217 struct ip_vs_conn *ct; 217 struct ip_vs_conn *ct;
218 __u16 dport; /* destination port to forward */ 218 __be16 dport; /* destination port to forward */
219 __u32 snet; /* source network of the client, after masking */ 219 __be32 snet; /* source network of the client, after masking */
220 220
221 /* Mask saddr with the netmask to adjust template granularity */ 221 /* Mask saddr with the netmask to adjust template granularity */
222 snet = iph->saddr & svc->netmask; 222 snet = iph->saddr & svc->netmask;
@@ -383,7 +383,7 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
383 struct ip_vs_conn *cp = NULL; 383 struct ip_vs_conn *cp = NULL;
384 struct iphdr *iph = skb->nh.iph; 384 struct iphdr *iph = skb->nh.iph;
385 struct ip_vs_dest *dest; 385 struct ip_vs_dest *dest;
386 __u16 _ports[2], *pptr; 386 __be16 _ports[2], *pptr;
387 387
388 pptr = skb_header_pointer(skb, iph->ihl*4, 388 pptr = skb_header_pointer(skb, iph->ihl*4,
389 sizeof(_ports), _ports); 389 sizeof(_ports), _ports);
@@ -446,7 +446,7 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
446int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 446int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
447 struct ip_vs_protocol *pp) 447 struct ip_vs_protocol *pp)
448{ 448{
449 __u16 _ports[2], *pptr; 449 __be16 _ports[2], *pptr;
450 struct iphdr *iph = skb->nh.iph; 450 struct iphdr *iph = skb->nh.iph;
451 451
452 pptr = skb_header_pointer(skb, iph->ihl*4, 452 pptr = skb_header_pointer(skb, iph->ihl*4,
@@ -576,7 +576,7 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
576 576
577 /* the TCP/UDP port */ 577 /* the TCP/UDP port */
578 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) { 578 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) {
579 __u16 *ports = (void *)ciph + ciph->ihl*4; 579 __be16 *ports = (void *)ciph + ciph->ihl*4;
580 580
581 if (inout) 581 if (inout)
582 ports[1] = cp->vport; 582 ports[1] = cp->vport;
@@ -775,7 +775,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
775 if (sysctl_ip_vs_nat_icmp_send && 775 if (sysctl_ip_vs_nat_icmp_send &&
776 (pp->protocol == IPPROTO_TCP || 776 (pp->protocol == IPPROTO_TCP ||
777 pp->protocol == IPPROTO_UDP)) { 777 pp->protocol == IPPROTO_UDP)) {
778 __u16 _ports[2], *pptr; 778 __be16 _ports[2], *pptr;
779 779
780 pptr = skb_header_pointer(skb, ihl, 780 pptr = skb_header_pointer(skb, ihl,
781 sizeof(_ports), _ports); 781 sizeof(_ports), _ports);
@@ -813,6 +813,16 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
813 skb->nh.iph->saddr = cp->vaddr; 813 skb->nh.iph->saddr = cp->vaddr;
814 ip_send_check(skb->nh.iph); 814 ip_send_check(skb->nh.iph);
815 815
816 /* For policy routing, packets originating from this
817 * machine itself may be routed differently to packets
818 * passing through. We want this packet to be routed as
819 * if it came from this machine itself. So re-compute
820 * the routing information.
821 */
822 if (ip_route_me_harder(pskb, RTN_LOCAL) != 0)
823 goto drop;
824 skb = *pskb;
825
816 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); 826 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
817 827
818 ip_vs_out_stats(cp, skb); 828 ip_vs_out_stats(cp, skb);
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 6a28fafe910c..f261616e4602 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -283,7 +283,7 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
283 * Returns hash value for virtual service 283 * Returns hash value for virtual service
284 */ 284 */
285static __inline__ unsigned 285static __inline__ unsigned
286ip_vs_svc_hashkey(unsigned proto, __u32 addr, __u16 port) 286ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port)
287{ 287{
288 register unsigned porth = ntohs(port); 288 register unsigned porth = ntohs(port);
289 289
@@ -365,7 +365,7 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
365 * Get service by {proto,addr,port} in the service table. 365 * Get service by {proto,addr,port} in the service table.
366 */ 366 */
367static __inline__ struct ip_vs_service * 367static __inline__ struct ip_vs_service *
368__ip_vs_service_get(__u16 protocol, __u32 vaddr, __u16 vport) 368__ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport)
369{ 369{
370 unsigned hash; 370 unsigned hash;
371 struct ip_vs_service *svc; 371 struct ip_vs_service *svc;
@@ -410,7 +410,7 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
410} 410}
411 411
412struct ip_vs_service * 412struct ip_vs_service *
413ip_vs_service_get(__u32 fwmark, __u16 protocol, __u32 vaddr, __u16 vport) 413ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
414{ 414{
415 struct ip_vs_service *svc; 415 struct ip_vs_service *svc;
416 416
@@ -480,7 +480,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
480/* 480/*
481 * Returns hash value for real service 481 * Returns hash value for real service
482 */ 482 */
483static __inline__ unsigned ip_vs_rs_hashkey(__u32 addr, __u16 port) 483static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port)
484{ 484{
485 register unsigned porth = ntohs(port); 485 register unsigned porth = ntohs(port);
486 486
@@ -531,7 +531,7 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
531 * Lookup real service by <proto,addr,port> in the real service table. 531 * Lookup real service by <proto,addr,port> in the real service table.
532 */ 532 */
533struct ip_vs_dest * 533struct ip_vs_dest *
534ip_vs_lookup_real_service(__u16 protocol, __u32 daddr, __u16 dport) 534ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
535{ 535{
536 unsigned hash; 536 unsigned hash;
537 struct ip_vs_dest *dest; 537 struct ip_vs_dest *dest;
@@ -562,7 +562,7 @@ ip_vs_lookup_real_service(__u16 protocol, __u32 daddr, __u16 dport)
562 * Lookup destination by {addr,port} in the given service 562 * Lookup destination by {addr,port} in the given service
563 */ 563 */
564static struct ip_vs_dest * 564static struct ip_vs_dest *
565ip_vs_lookup_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport) 565ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
566{ 566{
567 struct ip_vs_dest *dest; 567 struct ip_vs_dest *dest;
568 568
@@ -591,7 +591,7 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport)
591 * scheduling. 591 * scheduling.
592 */ 592 */
593static struct ip_vs_dest * 593static struct ip_vs_dest *
594ip_vs_trash_get_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport) 594ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
595{ 595{
596 struct ip_vs_dest *dest, *nxt; 596 struct ip_vs_dest *dest, *nxt;
597 597
@@ -773,8 +773,8 @@ static int
773ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) 773ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
774{ 774{
775 struct ip_vs_dest *dest; 775 struct ip_vs_dest *dest;
776 __u32 daddr = udest->addr; 776 __be32 daddr = udest->addr;
777 __u16 dport = udest->port; 777 __be16 dport = udest->port;
778 int ret; 778 int ret;
779 779
780 EnterFunction(2); 780 EnterFunction(2);
@@ -879,8 +879,8 @@ static int
879ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) 879ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
880{ 880{
881 struct ip_vs_dest *dest; 881 struct ip_vs_dest *dest;
882 __u32 daddr = udest->addr; 882 __be32 daddr = udest->addr;
883 __u16 dport = udest->port; 883 __be16 dport = udest->port;
884 884
885 EnterFunction(2); 885 EnterFunction(2);
886 886
@@ -991,8 +991,8 @@ static int
991ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) 991ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest)
992{ 992{
993 struct ip_vs_dest *dest; 993 struct ip_vs_dest *dest;
994 __u32 daddr = udest->addr; 994 __be32 daddr = udest->addr;
995 __u16 dport = udest->port; 995 __be16 dport = udest->port;
996 996
997 EnterFunction(2); 997 EnterFunction(2);
998 998
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index 9fee19c4c617..502111fba872 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -66,7 +66,7 @@ struct ip_vs_dh_bucket {
66/* 66/*
67 * Returns hash value for IPVS DH entry 67 * Returns hash value for IPVS DH entry
68 */ 68 */
69static inline unsigned ip_vs_dh_hashkey(__u32 addr) 69static inline unsigned ip_vs_dh_hashkey(__be32 addr)
70{ 70{
71 return (ntohl(addr)*2654435761UL) & IP_VS_DH_TAB_MASK; 71 return (ntohl(addr)*2654435761UL) & IP_VS_DH_TAB_MASK;
72} 72}
@@ -76,7 +76,7 @@ static inline unsigned ip_vs_dh_hashkey(__u32 addr)
76 * Get ip_vs_dest associated with supplied parameters. 76 * Get ip_vs_dest associated with supplied parameters.
77 */ 77 */
78static inline struct ip_vs_dest * 78static inline struct ip_vs_dest *
79ip_vs_dh_get(struct ip_vs_dh_bucket *tbl, __u32 addr) 79ip_vs_dh_get(struct ip_vs_dh_bucket *tbl, __be32 addr)
80{ 80{
81 return (tbl[ip_vs_dh_hashkey(addr)]).dest; 81 return (tbl[ip_vs_dh_hashkey(addr)]).dest;
82} 82}
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index 37fafb1fbcff..6d398f10aa91 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -32,6 +32,7 @@
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <net/protocol.h> 33#include <net/protocol.h>
34#include <net/tcp.h> 34#include <net/tcp.h>
35#include <asm/unaligned.h>
35 36
36#include <net/ip_vs.h> 37#include <net/ip_vs.h>
37 38
@@ -44,8 +45,8 @@
44 * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper 45 * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
45 * First port is set to the default port. 46 * First port is set to the default port.
46 */ 47 */
47static int ports[IP_VS_APP_MAX_PORTS] = {21, 0}; 48static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0};
48module_param_array(ports, int, NULL, 0); 49module_param_array(ports, ushort, NULL, 0);
49MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands"); 50MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands");
50 51
51 52
@@ -74,7 +75,7 @@ ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp)
74 */ 75 */
75static int ip_vs_ftp_get_addrport(char *data, char *data_limit, 76static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
76 const char *pattern, size_t plen, char term, 77 const char *pattern, size_t plen, char term,
77 __u32 *addr, __u16 *port, 78 __be32 *addr, __be16 *port,
78 char **start, char **end) 79 char **start, char **end)
79{ 80{
80 unsigned char p[6]; 81 unsigned char p[6];
@@ -114,8 +115,8 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
114 if (i != 5) 115 if (i != 5)
115 return -1; 116 return -1;
116 117
117 *addr = (p[3]<<24) | (p[2]<<16) | (p[1]<<8) | p[0]; 118 *addr = get_unaligned((__be32 *)p);
118 *port = (p[5]<<8) | p[4]; 119 *port = get_unaligned((__be16 *)(p + 4));
119 return 1; 120 return 1;
120} 121}
121 122
@@ -140,8 +141,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
140 struct tcphdr *th; 141 struct tcphdr *th;
141 char *data, *data_limit; 142 char *data, *data_limit;
142 char *start, *end; 143 char *start, *end;
143 __u32 from; 144 __be32 from;
144 __u16 port; 145 __be16 port;
145 struct ip_vs_conn *n_cp; 146 struct ip_vs_conn *n_cp;
146 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ 147 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
147 unsigned buf_len; 148 unsigned buf_len;
@@ -199,7 +200,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
199 from = n_cp->vaddr; 200 from = n_cp->vaddr;
200 port = n_cp->vport; 201 port = n_cp->vport;
201 sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from), 202 sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from),
202 port&255, (port>>8)&255); 203 ntohs(port)&255, (ntohs(port)>>8)&255);
203 buf_len = strlen(buf); 204 buf_len = strlen(buf);
204 205
205 /* 206 /*
@@ -243,8 +244,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
243 struct tcphdr *th; 244 struct tcphdr *th;
244 char *data, *data_start, *data_limit; 245 char *data, *data_start, *data_limit;
245 char *start, *end; 246 char *start, *end;
246 __u32 to; 247 __be32 to;
247 __u16 port; 248 __be16 port;
248 struct ip_vs_conn *n_cp; 249 struct ip_vs_conn *n_cp;
249 250
250 /* no diff required for incoming packets */ 251 /* no diff required for incoming packets */
@@ -273,7 +274,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
273 while (data <= data_limit - 6) { 274 while (data <= data_limit - 6) {
274 if (strnicmp(data, "PASV\r\n", 6) == 0) { 275 if (strnicmp(data, "PASV\r\n", 6) == 0) {
275 /* Passive mode on */ 276 /* Passive mode on */
276 IP_VS_DBG(7, "got PASV at %zd of %zd\n", 277 IP_VS_DBG(7, "got PASV at %td of %td\n",
277 data - data_start, 278 data - data_start,
278 data_limit - data_start); 279 data_limit - data_start);
279 cp->app_data = &ip_vs_ftp_pasv; 280 cp->app_data = &ip_vs_ftp_pasv;
@@ -365,12 +366,6 @@ static int __init ip_vs_ftp_init(void)
365 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { 366 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
366 if (!ports[i]) 367 if (!ports[i])
367 continue; 368 continue;
368 if (ports[i] < 0 || ports[i] > 0xffff) {
369 IP_VS_WARNING("ip_vs_ftp: Ignoring invalid "
370 "configuration port[%d] = %d\n",
371 i, ports[i]);
372 continue;
373 }
374 ret = register_ip_vs_app_inc(app, app->protocol, ports[i]); 369 ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
375 if (ret) 370 if (ret)
376 break; 371 break;
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 6e5cb92a5c83..524751e031de 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -87,7 +87,7 @@ static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
87 */ 87 */
88struct ip_vs_lblc_entry { 88struct ip_vs_lblc_entry {
89 struct list_head list; 89 struct list_head list;
90 __u32 addr; /* destination IP address */ 90 __be32 addr; /* destination IP address */
91 struct ip_vs_dest *dest; /* real server (cache) */ 91 struct ip_vs_dest *dest; /* real server (cache) */
92 unsigned long lastuse; /* last used time */ 92 unsigned long lastuse; /* last used time */
93}; 93};
@@ -160,7 +160,7 @@ static struct ctl_table_header * sysctl_header;
160 * IP address to a server. 160 * IP address to a server.
161 */ 161 */
162static inline struct ip_vs_lblc_entry * 162static inline struct ip_vs_lblc_entry *
163ip_vs_lblc_new(__u32 daddr, struct ip_vs_dest *dest) 163ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
164{ 164{
165 struct ip_vs_lblc_entry *en; 165 struct ip_vs_lblc_entry *en;
166 166
@@ -195,7 +195,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
195/* 195/*
196 * Returns hash value for IPVS LBLC entry 196 * Returns hash value for IPVS LBLC entry
197 */ 197 */
198static inline unsigned ip_vs_lblc_hashkey(__u32 addr) 198static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
199{ 199{
200 return (ntohl(addr)*2654435761UL) & IP_VS_LBLC_TAB_MASK; 200 return (ntohl(addr)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
201} 201}
@@ -234,7 +234,7 @@ ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
234 * Get ip_vs_lblc_entry associated with supplied parameters. 234 * Get ip_vs_lblc_entry associated with supplied parameters.
235 */ 235 */
236static inline struct ip_vs_lblc_entry * 236static inline struct ip_vs_lblc_entry *
237ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __u32 addr) 237ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
238{ 238{
239 unsigned hash; 239 unsigned hash;
240 struct ip_vs_lblc_entry *en; 240 struct ip_vs_lblc_entry *en;
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 32ba37ba72d8..08990192b6ec 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -276,7 +276,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
276 */ 276 */
277struct ip_vs_lblcr_entry { 277struct ip_vs_lblcr_entry {
278 struct list_head list; 278 struct list_head list;
279 __u32 addr; /* destination IP address */ 279 __be32 addr; /* destination IP address */
280 struct ip_vs_dest_set set; /* destination server set */ 280 struct ip_vs_dest_set set; /* destination server set */
281 unsigned long lastuse; /* last used time */ 281 unsigned long lastuse; /* last used time */
282}; 282};
@@ -348,7 +348,7 @@ static struct ctl_table_header * sysctl_header;
348 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination 348 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
349 * IP address to a server. 349 * IP address to a server.
350 */ 350 */
351static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__u32 daddr) 351static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
352{ 352{
353 struct ip_vs_lblcr_entry *en; 353 struct ip_vs_lblcr_entry *en;
354 354
@@ -381,7 +381,7 @@ static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
381/* 381/*
382 * Returns hash value for IPVS LBLCR entry 382 * Returns hash value for IPVS LBLCR entry
383 */ 383 */
384static inline unsigned ip_vs_lblcr_hashkey(__u32 addr) 384static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
385{ 385{
386 return (ntohl(addr)*2654435761UL) & IP_VS_LBLCR_TAB_MASK; 386 return (ntohl(addr)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
387} 387}
@@ -420,7 +420,7 @@ ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
420 * Get ip_vs_lblcr_entry associated with supplied parameters. 420 * Get ip_vs_lblcr_entry associated with supplied parameters.
421 */ 421 */
422static inline struct ip_vs_lblcr_entry * 422static inline struct ip_vs_lblcr_entry *
423ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __u32 addr) 423ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
424{ 424{
425 unsigned hash; 425 unsigned hash;
426 struct ip_vs_lblcr_entry *en; 426 struct ip_vs_lblcr_entry *en;
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 867d4e9c6594..c4528b5c800d 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -176,7 +176,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
176 pp->name, NIPQUAD(ih->saddr), 176 pp->name, NIPQUAD(ih->saddr),
177 NIPQUAD(ih->daddr)); 177 NIPQUAD(ih->daddr));
178 else { 178 else {
179 __u16 _ports[2], *pptr 179 __be16 _ports[2], *pptr
180; 180;
181 pptr = skb_header_pointer(skb, offset + ih->ihl*4, 181 pptr = skb_header_pointer(skb, offset + ih->ihl*4,
182 sizeof(_ports), _ports); 182 sizeof(_ports), _ports);
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index 820e8318d10d..bfe779e74590 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -29,7 +29,7 @@ static struct ip_vs_conn *
29tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 29tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
30 const struct iphdr *iph, unsigned int proto_off, int inverse) 30 const struct iphdr *iph, unsigned int proto_off, int inverse)
31{ 31{
32 __u16 _ports[2], *pptr; 32 __be16 _ports[2], *pptr;
33 33
34 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); 34 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
35 if (pptr == NULL) 35 if (pptr == NULL)
@@ -50,7 +50,7 @@ static struct ip_vs_conn *
50tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 50tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
51 const struct iphdr *iph, unsigned int proto_off, int inverse) 51 const struct iphdr *iph, unsigned int proto_off, int inverse)
52{ 52{
53 __u16 _ports[2], *pptr; 53 __be16 _ports[2], *pptr;
54 54
55 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); 55 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
56 if (pptr == NULL) 56 if (pptr == NULL)
@@ -112,12 +112,12 @@ tcp_conn_schedule(struct sk_buff *skb,
112 112
113 113
114static inline void 114static inline void
115tcp_fast_csum_update(struct tcphdr *tcph, u32 oldip, u32 newip, 115tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip,
116 u16 oldport, u16 newport) 116 __be16 oldport, __be16 newport)
117{ 117{
118 tcph->check = 118 tcph->check =
119 ip_vs_check_diff(~oldip, newip, 119 ip_vs_check_diff(~oldip, newip,
120 ip_vs_check_diff(oldport ^ 0xFFFF, 120 ip_vs_check_diff(oldport ^ htonl(0xFFFF),
121 newport, tcph->check)); 121 newport, tcph->check));
122} 122}
123 123
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 90c8166c0ec1..54aa7603591f 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -29,7 +29,7 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
29 const struct iphdr *iph, unsigned int proto_off, int inverse) 29 const struct iphdr *iph, unsigned int proto_off, int inverse)
30{ 30{
31 struct ip_vs_conn *cp; 31 struct ip_vs_conn *cp;
32 __u16 _ports[2], *pptr; 32 __be16 _ports[2], *pptr;
33 33
34 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); 34 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
35 if (pptr == NULL) 35 if (pptr == NULL)
@@ -54,7 +54,7 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
54 const struct iphdr *iph, unsigned int proto_off, int inverse) 54 const struct iphdr *iph, unsigned int proto_off, int inverse)
55{ 55{
56 struct ip_vs_conn *cp; 56 struct ip_vs_conn *cp;
57 __u16 _ports[2], *pptr; 57 __be16 _ports[2], *pptr;
58 58
59 pptr = skb_header_pointer(skb, skb->nh.iph->ihl*4, 59 pptr = skb_header_pointer(skb, skb->nh.iph->ihl*4,
60 sizeof(_ports), _ports); 60 sizeof(_ports), _ports);
@@ -117,15 +117,15 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
117 117
118 118
119static inline void 119static inline void
120udp_fast_csum_update(struct udphdr *uhdr, u32 oldip, u32 newip, 120udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip,
121 u16 oldport, u16 newport) 121 __be16 oldport, __be16 newport)
122{ 122{
123 uhdr->check = 123 uhdr->check =
124 ip_vs_check_diff(~oldip, newip, 124 ip_vs_check_diff(~oldip, newip,
125 ip_vs_check_diff(oldport ^ 0xFFFF, 125 ip_vs_check_diff(oldport ^ htonl(0xFFFF),
126 newport, uhdr->check)); 126 newport, uhdr->check));
127 if (!uhdr->check) 127 if (!uhdr->check)
128 uhdr->check = 0xFFFF; 128 uhdr->check = htonl(0xFFFF);
129} 129}
130 130
131static int 131static int
@@ -173,7 +173,7 @@ udp_snat_handler(struct sk_buff **pskb,
173 cp->protocol, 173 cp->protocol,
174 (*pskb)->csum); 174 (*pskb)->csum);
175 if (udph->check == 0) 175 if (udph->check == 0)
176 udph->check = 0xFFFF; 176 udph->check = htonl(0xFFFF);
177 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", 177 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
178 pp->name, udph->check, 178 pp->name, udph->check,
179 (char*)&(udph->check) - (char*)udph); 179 (char*)&(udph->check) - (char*)udph);
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 7775e6cc68be..338668f88fe2 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -63,7 +63,7 @@ struct ip_vs_sh_bucket {
63/* 63/*
64 * Returns hash value for IPVS SH entry 64 * Returns hash value for IPVS SH entry
65 */ 65 */
66static inline unsigned ip_vs_sh_hashkey(__u32 addr) 66static inline unsigned ip_vs_sh_hashkey(__be32 addr)
67{ 67{
68 return (ntohl(addr)*2654435761UL) & IP_VS_SH_TAB_MASK; 68 return (ntohl(addr)*2654435761UL) & IP_VS_SH_TAB_MASK;
69} 69}
@@ -73,7 +73,7 @@ static inline unsigned ip_vs_sh_hashkey(__u32 addr)
73 * Get ip_vs_dest associated with supplied parameters. 73 * Get ip_vs_dest associated with supplied parameters.
74 */ 74 */
75static inline struct ip_vs_dest * 75static inline struct ip_vs_dest *
76ip_vs_sh_get(struct ip_vs_sh_bucket *tbl, __u32 addr) 76ip_vs_sh_get(struct ip_vs_sh_bucket *tbl, __be32 addr)
77{ 77{
78 return (tbl[ip_vs_sh_hashkey(addr)]).dest; 78 return (tbl[ip_vs_sh_hashkey(addr)]).dest;
79} 79}
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 1bca714bda3d..91a075edd68e 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -48,16 +48,16 @@ struct ip_vs_sync_conn {
48 48
49 /* Protocol, addresses and port numbers */ 49 /* Protocol, addresses and port numbers */
50 __u8 protocol; /* Which protocol (TCP/UDP) */ 50 __u8 protocol; /* Which protocol (TCP/UDP) */
51 __u16 cport; 51 __be16 cport;
52 __u16 vport; 52 __be16 vport;
53 __u16 dport; 53 __be16 dport;
54 __u32 caddr; /* client address */ 54 __be32 caddr; /* client address */
55 __u32 vaddr; /* virtual address */ 55 __be32 vaddr; /* virtual address */
56 __u32 daddr; /* destination address */ 56 __be32 daddr; /* destination address */
57 57
58 /* Flags and state transition */ 58 /* Flags and state transition */
59 __u16 flags; /* status flags */ 59 __be16 flags; /* status flags */
60 __u16 state; /* state info */ 60 __be16 state; /* state info */
61 61
62 /* The sequence options start here */ 62 /* The sequence options start here */
63}; 63};
@@ -464,7 +464,7 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
464static int bind_mcastif_addr(struct socket *sock, char *ifname) 464static int bind_mcastif_addr(struct socket *sock, char *ifname)
465{ 465{
466 struct net_device *dev; 466 struct net_device *dev;
467 u32 addr; 467 __be32 addr;
468 struct sockaddr_in sin; 468 struct sockaddr_in sin;
469 469
470 if ((dev = __dev_get_by_name(ifname)) == NULL) 470 if ((dev = __dev_get_by_name(ifname)) == NULL)
@@ -836,7 +836,7 @@ static int fork_sync_thread(void *startup)
836 836
837int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) 837int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
838{ 838{
839 DECLARE_COMPLETION(startup); 839 DECLARE_COMPLETION_ONSTACK(startup);
840 pid_t pid; 840 pid_t pid;
841 841
842 if ((state == IP_VS_STATE_MASTER && sync_master_pid) || 842 if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 52c12e9edbbc..e1f77bd7c9a5 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -232,7 +232,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
232 232
233 /* check if it is a connection of no-client-port */ 233 /* check if it is a connection of no-client-port */
234 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { 234 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
235 __u16 _pt, *p; 235 __be16 _pt, *p;
236 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt); 236 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
237 if (p == NULL) 237 if (p == NULL)
238 goto tx_error; 238 goto tx_error;
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c
index d25ec4ae09e5..92b04823e034 100644
--- a/net/ipv4/multipath_wrandom.c
+++ b/net/ipv4/multipath_wrandom.c
@@ -60,8 +60,8 @@ struct multipath_dest {
60 struct list_head list; 60 struct list_head list;
61 61
62 const struct fib_nh *nh_info; 62 const struct fib_nh *nh_info;
63 __u32 netmask; 63 __be32 netmask;
64 __u32 network; 64 __be32 network;
65 unsigned char prefixlen; 65 unsigned char prefixlen;
66 66
67 struct rcu_head rcu; 67 struct rcu_head rcu;
@@ -76,7 +76,7 @@ struct multipath_route {
76 struct list_head list; 76 struct list_head list;
77 77
78 int oif; 78 int oif;
79 __u32 gw; 79 __be32 gw;
80 struct list_head dests; 80 struct list_head dests;
81 81
82 struct rcu_head rcu; 82 struct rcu_head rcu;
@@ -128,8 +128,8 @@ static unsigned char __multipath_lookup_weight(const struct flowi *fl,
128 128
129 /* find state entry for destination */ 129 /* find state entry for destination */
130 list_for_each_entry_rcu(d, &target_route->dests, list) { 130 list_for_each_entry_rcu(d, &target_route->dests, list) {
131 __u32 targetnetwork = fl->fl4_dst & 131 __be32 targetnetwork = fl->fl4_dst &
132 (0xFFFFFFFF >> (32 - d->prefixlen)); 132 inet_make_mask(d->prefixlen);
133 133
134 if ((targetnetwork & d->netmask) == d->network) { 134 if ((targetnetwork & d->netmask) == d->network) {
135 weight = d->nh_info->nh_weight; 135 weight = d->nh_info->nh_weight;
@@ -217,8 +217,8 @@ static void wrandom_select_route(const struct flowi *flp,
217 *rp = decision; 217 *rp = decision;
218} 218}
219 219
220static void wrandom_set_nhinfo(__u32 network, 220static void wrandom_set_nhinfo(__be32 network,
221 __u32 netmask, 221 __be32 netmask,
222 unsigned char prefixlen, 222 unsigned char prefixlen,
223 const struct fib_nh *nh) 223 const struct fib_nh *nh)
224{ 224{
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f88347de21a9..e2005c6810a4 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -8,7 +8,7 @@
8#include <net/ip.h> 8#include <net/ip.h>
9 9
10/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ 10/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
11int ip_route_me_harder(struct sk_buff **pskb) 11int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
12{ 12{
13 struct iphdr *iph = (*pskb)->nh.iph; 13 struct iphdr *iph = (*pskb)->nh.iph;
14 struct rtable *rt; 14 struct rtable *rt;
@@ -16,10 +16,13 @@ int ip_route_me_harder(struct sk_buff **pskb)
16 struct dst_entry *odst; 16 struct dst_entry *odst;
17 unsigned int hh_len; 17 unsigned int hh_len;
18 18
19 if (addr_type == RTN_UNSPEC)
20 addr_type = inet_addr_type(iph->saddr);
21
19 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 22 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
20 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook. 23 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
21 */ 24 */
22 if (inet_addr_type(iph->saddr) == RTN_LOCAL) { 25 if (addr_type == RTN_LOCAL) {
23 fl.nl_u.ip4_u.daddr = iph->daddr; 26 fl.nl_u.ip4_u.daddr = iph->daddr;
24 fl.nl_u.ip4_u.saddr = iph->saddr; 27 fl.nl_u.ip4_u.saddr = iph->saddr;
25 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 28 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
@@ -128,8 +131,8 @@ EXPORT_SYMBOL(ip_nat_decode_session);
128 */ 131 */
129 132
130struct ip_rt_info { 133struct ip_rt_info {
131 u_int32_t daddr; 134 __be32 daddr;
132 u_int32_t saddr; 135 __be32 saddr;
133 u_int8_t tos; 136 u_int8_t tos;
134}; 137};
135 138
@@ -156,7 +159,7 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info)
156 if (!(iph->tos == rt_info->tos 159 if (!(iph->tos == rt_info->tos
157 && iph->daddr == rt_info->daddr 160 && iph->daddr == rt_info->daddr
158 && iph->saddr == rt_info->saddr)) 161 && iph->saddr == rt_info->saddr))
159 return ip_route_me_harder(pskb); 162 return ip_route_me_harder(pskb, RTN_UNSPEC);
160 } 163 }
161 return 0; 164 return 0;
162} 165}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index a55b8ff70ded..d88c292f118c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -373,7 +373,7 @@ config IP_NF_TARGET_ULOG
373 daemon using netlink multicast sockets; unlike the LOG target 373 daemon using netlink multicast sockets; unlike the LOG target
374 which can only be viewed through syslog. 374 which can only be viewed through syslog.
375 375
376 The apropriate userspace logging daemon (ulogd) may be obtained from 376 The appropriate userspace logging daemon (ulogd) may be obtained from
377 <http://www.gnumonks.org/projects/ulogd/> 377 <http://www.gnumonks.org/projects/ulogd/>
378 378
379 To compile it as a module, choose M here. If unsure, say N. 379 To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 85f0d73ebfb4..0849f1cced13 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -80,7 +80,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
80{ 80{
81 char *arpptr = (char *)(arphdr + 1); 81 char *arpptr = (char *)(arphdr + 1);
82 char *src_devaddr, *tgt_devaddr; 82 char *src_devaddr, *tgt_devaddr;
83 u32 src_ipaddr, tgt_ipaddr; 83 __be32 src_ipaddr, tgt_ipaddr;
84 int i, ret; 84 int i, ret;
85 85
86#define FWINV(bool,invflg) ((bool) ^ !!(arpinfo->invflags & invflg)) 86#define FWINV(bool,invflg) ((bool) ^ !!(arpinfo->invflags & invflg))
@@ -1196,6 +1196,8 @@ err1:
1196static void __exit arp_tables_fini(void) 1196static void __exit arp_tables_fini(void)
1197{ 1197{
1198 nf_unregister_sockopt(&arpt_sockopts); 1198 nf_unregister_sockopt(&arpt_sockopts);
1199 xt_unregister_target(&arpt_error_target);
1200 xt_unregister_target(&arpt_standard_target);
1199 xt_proto_fini(NF_ARP); 1201 xt_proto_fini(NF_ARP);
1200} 1202}
1201 1203
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index 0a7bd7f04061..6c7383a8e42b 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -155,11 +155,11 @@ static int help(struct sk_buff **pskb,
155 exp->tuple.dst.protonum = IPPROTO_TCP; 155 exp->tuple.dst.protonum = IPPROTO_TCP;
156 exp->tuple.dst.u.tcp.port = htons(port); 156 exp->tuple.dst.u.tcp.port = htons(port);
157 157
158 exp->mask.src.ip = 0xFFFFFFFF; 158 exp->mask.src.ip = htonl(0xFFFFFFFF);
159 exp->mask.src.u.tcp.port = 0; 159 exp->mask.src.u.tcp.port = 0;
160 exp->mask.dst.ip = 0xFFFFFFFF; 160 exp->mask.dst.ip = htonl(0xFFFFFFFF);
161 exp->mask.dst.protonum = 0xFF; 161 exp->mask.dst.protonum = 0xFF;
162 exp->mask.dst.u.tcp.port = 0xFFFF; 162 exp->mask.dst.u.tcp.port = htons(0xFFFF);
163 163
164 if (ip_nat_amanda_hook) 164 if (ip_nat_amanda_hook)
165 ret = ip_nat_amanda_hook(pskb, ctinfo, off - dataoff, 165 ret = ip_nat_amanda_hook(pskb, ctinfo, off - dataoff,
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index c432b3163609..143c4668538b 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -149,8 +149,8 @@ static unsigned int ip_conntrack_hash_rnd;
149static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, 149static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
150 unsigned int size, unsigned int rnd) 150 unsigned int size, unsigned int rnd)
151{ 151{
152 return (jhash_3words(tuple->src.ip, 152 return (jhash_3words((__force u32)tuple->src.ip,
153 (tuple->dst.ip ^ tuple->dst.protonum), 153 ((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
154 (tuple->src.u.all | (tuple->dst.u.all << 16)), 154 (tuple->src.u.all | (tuple->dst.u.all << 16)),
155 rnd) % size); 155 rnd) % size);
156} 156}
@@ -1169,9 +1169,9 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1169int ip_ct_port_tuple_to_nfattr(struct sk_buff *skb, 1169int ip_ct_port_tuple_to_nfattr(struct sk_buff *skb,
1170 const struct ip_conntrack_tuple *tuple) 1170 const struct ip_conntrack_tuple *tuple)
1171{ 1171{
1172 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t), 1172 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(__be16),
1173 &tuple->src.u.tcp.port); 1173 &tuple->src.u.tcp.port);
1174 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t), 1174 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(__be16),
1175 &tuple->dst.u.tcp.port); 1175 &tuple->dst.u.tcp.port);
1176 return 0; 1176 return 0;
1177 1177
@@ -1186,9 +1186,9 @@ int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[],
1186 return -EINVAL; 1186 return -EINVAL;
1187 1187
1188 t->src.u.tcp.port = 1188 t->src.u.tcp.port =
1189 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); 1189 *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
1190 t->dst.u.tcp.port = 1190 t->dst.u.tcp.port =
1191 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); 1191 *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
1192 1192
1193 return 0; 1193 return 0;
1194} 1194}
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c
index 1d18c863f064..93dcf960662f 100644
--- a/net/ipv4/netfilter/ip_conntrack_ftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_ftp.c
@@ -425,8 +425,8 @@ static int help(struct sk_buff **pskb,
425 exp->tuple.src.u.tcp.port = 0; /* Don't care. */ 425 exp->tuple.src.u.tcp.port = 0; /* Don't care. */
426 exp->tuple.dst.protonum = IPPROTO_TCP; 426 exp->tuple.dst.protonum = IPPROTO_TCP;
427 exp->mask = ((struct ip_conntrack_tuple) 427 exp->mask = ((struct ip_conntrack_tuple)
428 { { 0xFFFFFFFF, { 0 } }, 428 { { htonl(0xFFFFFFFF), { 0 } },
429 { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }}); 429 { htonl(0xFFFFFFFF), { .tcp = { htons(0xFFFF) } }, 0xFF }});
430 430
431 exp->expectfn = NULL; 431 exp->expectfn = NULL;
432 exp->flags = 0; 432 exp->flags = 0;
@@ -488,7 +488,7 @@ static int __init ip_conntrack_ftp_init(void)
488 for (i = 0; i < ports_c; i++) { 488 for (i = 0; i < ports_c; i++) {
489 ftp[i].tuple.src.u.tcp.port = htons(ports[i]); 489 ftp[i].tuple.src.u.tcp.port = htons(ports[i]);
490 ftp[i].tuple.dst.protonum = IPPROTO_TCP; 490 ftp[i].tuple.dst.protonum = IPPROTO_TCP;
491 ftp[i].mask.src.u.tcp.port = 0xFFFF; 491 ftp[i].mask.src.u.tcp.port = htons(0xFFFF);
492 ftp[i].mask.dst.protonum = 0xFF; 492 ftp[i].mask.dst.protonum = 0xFF;
493 ftp[i].max_expected = 1; 493 ftp[i].max_expected = 1;
494 ftp[i].timeout = 5 * 60; /* 5 minutes */ 494 ftp[i].timeout = 5 * 60; /* 5 minutes */
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index 9a39e2969712..7b7441202bfd 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -49,11 +49,11 @@ MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
49int (*set_h245_addr_hook) (struct sk_buff ** pskb, 49int (*set_h245_addr_hook) (struct sk_buff ** pskb,
50 unsigned char **data, int dataoff, 50 unsigned char **data, int dataoff,
51 H245_TransportAddress * addr, 51 H245_TransportAddress * addr,
52 u_int32_t ip, u_int16_t port); 52 __be32 ip, u_int16_t port);
53int (*set_h225_addr_hook) (struct sk_buff ** pskb, 53int (*set_h225_addr_hook) (struct sk_buff ** pskb,
54 unsigned char **data, int dataoff, 54 unsigned char **data, int dataoff,
55 TransportAddress * addr, 55 TransportAddress * addr,
56 u_int32_t ip, u_int16_t port); 56 __be32 ip, u_int16_t port);
57int (*set_sig_addr_hook) (struct sk_buff ** pskb, 57int (*set_sig_addr_hook) (struct sk_buff ** pskb,
58 struct ip_conntrack * ct, 58 struct ip_conntrack * ct,
59 enum ip_conntrack_info ctinfo, 59 enum ip_conntrack_info ctinfo,
@@ -209,7 +209,7 @@ static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct,
209 209
210/****************************************************************************/ 210/****************************************************************************/
211static int get_h245_addr(unsigned char *data, H245_TransportAddress * addr, 211static int get_h245_addr(unsigned char *data, H245_TransportAddress * addr,
212 u_int32_t * ip, u_int16_t * port) 212 __be32 * ip, u_int16_t * port)
213{ 213{
214 unsigned char *p; 214 unsigned char *p;
215 215
@@ -232,7 +232,7 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct,
232{ 232{
233 int dir = CTINFO2DIR(ctinfo); 233 int dir = CTINFO2DIR(ctinfo);
234 int ret = 0; 234 int ret = 0;
235 u_int32_t ip; 235 __be32 ip;
236 u_int16_t port; 236 u_int16_t port;
237 u_int16_t rtp_port; 237 u_int16_t rtp_port;
238 struct ip_conntrack_expect *rtp_exp; 238 struct ip_conntrack_expect *rtp_exp;
@@ -254,10 +254,10 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct,
254 rtp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; 254 rtp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
255 rtp_exp->tuple.dst.u.udp.port = htons(rtp_port); 255 rtp_exp->tuple.dst.u.udp.port = htons(rtp_port);
256 rtp_exp->tuple.dst.protonum = IPPROTO_UDP; 256 rtp_exp->tuple.dst.protonum = IPPROTO_UDP;
257 rtp_exp->mask.src.ip = 0xFFFFFFFF; 257 rtp_exp->mask.src.ip = htonl(0xFFFFFFFF);
258 rtp_exp->mask.src.u.udp.port = 0; 258 rtp_exp->mask.src.u.udp.port = 0;
259 rtp_exp->mask.dst.ip = 0xFFFFFFFF; 259 rtp_exp->mask.dst.ip = htonl(0xFFFFFFFF);
260 rtp_exp->mask.dst.u.udp.port = 0xFFFF; 260 rtp_exp->mask.dst.u.udp.port = htons(0xFFFF);
261 rtp_exp->mask.dst.protonum = 0xFF; 261 rtp_exp->mask.dst.protonum = 0xFF;
262 rtp_exp->flags = 0; 262 rtp_exp->flags = 0;
263 263
@@ -271,10 +271,10 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct,
271 rtcp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; 271 rtcp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
272 rtcp_exp->tuple.dst.u.udp.port = htons(rtp_port + 1); 272 rtcp_exp->tuple.dst.u.udp.port = htons(rtp_port + 1);
273 rtcp_exp->tuple.dst.protonum = IPPROTO_UDP; 273 rtcp_exp->tuple.dst.protonum = IPPROTO_UDP;
274 rtcp_exp->mask.src.ip = 0xFFFFFFFF; 274 rtcp_exp->mask.src.ip = htonl(0xFFFFFFFF);
275 rtcp_exp->mask.src.u.udp.port = 0; 275 rtcp_exp->mask.src.u.udp.port = 0;
276 rtcp_exp->mask.dst.ip = 0xFFFFFFFF; 276 rtcp_exp->mask.dst.ip = htonl(0xFFFFFFFF);
277 rtcp_exp->mask.dst.u.udp.port = 0xFFFF; 277 rtcp_exp->mask.dst.u.udp.port = htons(0xFFFF);
278 rtcp_exp->mask.dst.protonum = 0xFF; 278 rtcp_exp->mask.dst.protonum = 0xFF;
279 rtcp_exp->flags = 0; 279 rtcp_exp->flags = 0;
280 280
@@ -325,7 +325,7 @@ static int expect_t120(struct sk_buff **pskb,
325{ 325{
326 int dir = CTINFO2DIR(ctinfo); 326 int dir = CTINFO2DIR(ctinfo);
327 int ret = 0; 327 int ret = 0;
328 u_int32_t ip; 328 __be32 ip;
329 u_int16_t port; 329 u_int16_t port;
330 struct ip_conntrack_expect *exp = NULL; 330 struct ip_conntrack_expect *exp = NULL;
331 331
@@ -342,10 +342,10 @@ static int expect_t120(struct sk_buff **pskb,
342 exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; 342 exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
343 exp->tuple.dst.u.tcp.port = htons(port); 343 exp->tuple.dst.u.tcp.port = htons(port);
344 exp->tuple.dst.protonum = IPPROTO_TCP; 344 exp->tuple.dst.protonum = IPPROTO_TCP;
345 exp->mask.src.ip = 0xFFFFFFFF; 345 exp->mask.src.ip = htonl(0xFFFFFFFF);
346 exp->mask.src.u.tcp.port = 0; 346 exp->mask.src.u.tcp.port = 0;
347 exp->mask.dst.ip = 0xFFFFFFFF; 347 exp->mask.dst.ip = htonl(0xFFFFFFFF);
348 exp->mask.dst.u.tcp.port = 0xFFFF; 348 exp->mask.dst.u.tcp.port = htons(0xFFFF);
349 exp->mask.dst.protonum = 0xFF; 349 exp->mask.dst.protonum = 0xFF;
350 exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple channels */ 350 exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple channels */
351 351
@@ -626,7 +626,7 @@ void ip_conntrack_h245_expect(struct ip_conntrack *new,
626 626
627/****************************************************************************/ 627/****************************************************************************/
628int get_h225_addr(unsigned char *data, TransportAddress * addr, 628int get_h225_addr(unsigned char *data, TransportAddress * addr,
629 u_int32_t * ip, u_int16_t * port) 629 __be32 * ip, u_int16_t * port)
630{ 630{
631 unsigned char *p; 631 unsigned char *p;
632 632
@@ -648,7 +648,7 @@ static int expect_h245(struct sk_buff **pskb, struct ip_conntrack *ct,
648{ 648{
649 int dir = CTINFO2DIR(ctinfo); 649 int dir = CTINFO2DIR(ctinfo);
650 int ret = 0; 650 int ret = 0;
651 u_int32_t ip; 651 __be32 ip;
652 u_int16_t port; 652 u_int16_t port;
653 struct ip_conntrack_expect *exp = NULL; 653 struct ip_conntrack_expect *exp = NULL;
654 654
@@ -665,10 +665,10 @@ static int expect_h245(struct sk_buff **pskb, struct ip_conntrack *ct,
665 exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; 665 exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
666 exp->tuple.dst.u.tcp.port = htons(port); 666 exp->tuple.dst.u.tcp.port = htons(port);
667 exp->tuple.dst.protonum = IPPROTO_TCP; 667 exp->tuple.dst.protonum = IPPROTO_TCP;
668 exp->mask.src.ip = 0xFFFFFFFF; 668 exp->mask.src.ip = htonl(0xFFFFFFFF);
669 exp->mask.src.u.tcp.port = 0; 669 exp->mask.src.u.tcp.port = 0;
670 exp->mask.dst.ip = 0xFFFFFFFF; 670 exp->mask.dst.ip = htonl(0xFFFFFFFF);
671 exp->mask.dst.u.tcp.port = 0xFFFF; 671 exp->mask.dst.u.tcp.port = htons(0xFFFF);
672 exp->mask.dst.protonum = 0xFF; 672 exp->mask.dst.protonum = 0xFF;
673 exp->flags = 0; 673 exp->flags = 0;
674 674
@@ -709,7 +709,7 @@ static int expect_callforwarding(struct sk_buff **pskb,
709{ 709{
710 int dir = CTINFO2DIR(ctinfo); 710 int dir = CTINFO2DIR(ctinfo);
711 int ret = 0; 711 int ret = 0;
712 u_int32_t ip; 712 __be32 ip;
713 u_int16_t port; 713 u_int16_t port;
714 struct ip_conntrack_expect *exp = NULL; 714 struct ip_conntrack_expect *exp = NULL;
715 715
@@ -751,10 +751,10 @@ static int expect_callforwarding(struct sk_buff **pskb,
751 exp->tuple.dst.ip = ip; 751 exp->tuple.dst.ip = ip;
752 exp->tuple.dst.u.tcp.port = htons(port); 752 exp->tuple.dst.u.tcp.port = htons(port);
753 exp->tuple.dst.protonum = IPPROTO_TCP; 753 exp->tuple.dst.protonum = IPPROTO_TCP;
754 exp->mask.src.ip = 0xFFFFFFFF; 754 exp->mask.src.ip = htonl(0xFFFFFFFF);
755 exp->mask.src.u.tcp.port = 0; 755 exp->mask.src.u.tcp.port = 0;
756 exp->mask.dst.ip = 0xFFFFFFFF; 756 exp->mask.dst.ip = htonl(0xFFFFFFFF);
757 exp->mask.dst.u.tcp.port = 0xFFFF; 757 exp->mask.dst.u.tcp.port = htons(0xFFFF);
758 exp->mask.dst.protonum = 0xFF; 758 exp->mask.dst.protonum = 0xFF;
759 exp->flags = 0; 759 exp->flags = 0;
760 760
@@ -791,7 +791,7 @@ static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct,
791 int dir = CTINFO2DIR(ctinfo); 791 int dir = CTINFO2DIR(ctinfo);
792 int ret; 792 int ret;
793 int i; 793 int i;
794 u_int32_t ip; 794 __be32 ip;
795 u_int16_t port; 795 u_int16_t port;
796 796
797 DEBUGP("ip_ct_q931: Setup\n"); 797 DEBUGP("ip_ct_q931: Setup\n");
@@ -1188,7 +1188,7 @@ static unsigned char *get_udp_data(struct sk_buff **pskb, int *datalen)
1188 1188
1189/****************************************************************************/ 1189/****************************************************************************/
1190static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct, 1190static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
1191 u_int32_t ip, u_int16_t port) 1191 __be32 ip, u_int16_t port)
1192{ 1192{
1193 struct ip_conntrack_expect *exp; 1193 struct ip_conntrack_expect *exp;
1194 struct ip_conntrack_tuple tuple; 1194 struct ip_conntrack_tuple tuple;
@@ -1228,7 +1228,7 @@ static int expect_q931(struct sk_buff **pskb, struct ip_conntrack *ct,
1228 int dir = CTINFO2DIR(ctinfo); 1228 int dir = CTINFO2DIR(ctinfo);
1229 int ret = 0; 1229 int ret = 0;
1230 int i; 1230 int i;
1231 u_int32_t ip; 1231 __be32 ip;
1232 u_int16_t port; 1232 u_int16_t port;
1233 struct ip_conntrack_expect *exp; 1233 struct ip_conntrack_expect *exp;
1234 1234
@@ -1251,10 +1251,10 @@ static int expect_q931(struct sk_buff **pskb, struct ip_conntrack *ct,
1251 exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; 1251 exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
1252 exp->tuple.dst.u.tcp.port = htons(port); 1252 exp->tuple.dst.u.tcp.port = htons(port);
1253 exp->tuple.dst.protonum = IPPROTO_TCP; 1253 exp->tuple.dst.protonum = IPPROTO_TCP;
1254 exp->mask.src.ip = gkrouted_only ? 0xFFFFFFFF : 0; 1254 exp->mask.src.ip = gkrouted_only ? htonl(0xFFFFFFFF) : 0;
1255 exp->mask.src.u.tcp.port = 0; 1255 exp->mask.src.u.tcp.port = 0;
1256 exp->mask.dst.ip = 0xFFFFFFFF; 1256 exp->mask.dst.ip = htonl(0xFFFFFFFF);
1257 exp->mask.dst.u.tcp.port = 0xFFFF; 1257 exp->mask.dst.u.tcp.port = htons(0xFFFF);
1258 exp->mask.dst.protonum = 0xFF; 1258 exp->mask.dst.protonum = 0xFF;
1259 exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple calls */ 1259 exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple calls */
1260 1260
@@ -1307,7 +1307,7 @@ static int process_gcf(struct sk_buff **pskb, struct ip_conntrack *ct,
1307{ 1307{
1308 int dir = CTINFO2DIR(ctinfo); 1308 int dir = CTINFO2DIR(ctinfo);
1309 int ret = 0; 1309 int ret = 0;
1310 u_int32_t ip; 1310 __be32 ip;
1311 u_int16_t port; 1311 u_int16_t port;
1312 struct ip_conntrack_expect *exp; 1312 struct ip_conntrack_expect *exp;
1313 1313
@@ -1333,10 +1333,10 @@ static int process_gcf(struct sk_buff **pskb, struct ip_conntrack *ct,
1333 exp->tuple.dst.ip = ip; 1333 exp->tuple.dst.ip = ip;
1334 exp->tuple.dst.u.tcp.port = htons(port); 1334 exp->tuple.dst.u.tcp.port = htons(port);
1335 exp->tuple.dst.protonum = IPPROTO_UDP; 1335 exp->tuple.dst.protonum = IPPROTO_UDP;
1336 exp->mask.src.ip = 0xFFFFFFFF; 1336 exp->mask.src.ip = htonl(0xFFFFFFFF);
1337 exp->mask.src.u.tcp.port = 0; 1337 exp->mask.src.u.tcp.port = 0;
1338 exp->mask.dst.ip = 0xFFFFFFFF; 1338 exp->mask.dst.ip = htonl(0xFFFFFFFF);
1339 exp->mask.dst.u.tcp.port = 0xFFFF; 1339 exp->mask.dst.u.tcp.port = htons(0xFFFF);
1340 exp->mask.dst.protonum = 0xFF; 1340 exp->mask.dst.protonum = 0xFF;
1341 exp->flags = 0; 1341 exp->flags = 0;
1342 exp->expectfn = ip_conntrack_ras_expect; 1342 exp->expectfn = ip_conntrack_ras_expect;
@@ -1477,7 +1477,7 @@ static int process_arq(struct sk_buff **pskb, struct ip_conntrack *ct,
1477{ 1477{
1478 struct ip_ct_h323_master *info = &ct->help.ct_h323_info; 1478 struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
1479 int dir = CTINFO2DIR(ctinfo); 1479 int dir = CTINFO2DIR(ctinfo);
1480 u_int32_t ip; 1480 __be32 ip;
1481 u_int16_t port; 1481 u_int16_t port;
1482 1482
1483 DEBUGP("ip_ct_ras: ARQ\n"); 1483 DEBUGP("ip_ct_ras: ARQ\n");
@@ -1513,7 +1513,7 @@ static int process_acf(struct sk_buff **pskb, struct ip_conntrack *ct,
1513{ 1513{
1514 int dir = CTINFO2DIR(ctinfo); 1514 int dir = CTINFO2DIR(ctinfo);
1515 int ret = 0; 1515 int ret = 0;
1516 u_int32_t ip; 1516 __be32 ip;
1517 u_int16_t port; 1517 u_int16_t port;
1518 struct ip_conntrack_expect *exp; 1518 struct ip_conntrack_expect *exp;
1519 1519
@@ -1538,10 +1538,10 @@ static int process_acf(struct sk_buff **pskb, struct ip_conntrack *ct,
1538 exp->tuple.dst.ip = ip; 1538 exp->tuple.dst.ip = ip;
1539 exp->tuple.dst.u.tcp.port = htons(port); 1539 exp->tuple.dst.u.tcp.port = htons(port);
1540 exp->tuple.dst.protonum = IPPROTO_TCP; 1540 exp->tuple.dst.protonum = IPPROTO_TCP;
1541 exp->mask.src.ip = 0xFFFFFFFF; 1541 exp->mask.src.ip = htonl(0xFFFFFFFF);
1542 exp->mask.src.u.tcp.port = 0; 1542 exp->mask.src.u.tcp.port = 0;
1543 exp->mask.dst.ip = 0xFFFFFFFF; 1543 exp->mask.dst.ip = htonl(0xFFFFFFFF);
1544 exp->mask.dst.u.tcp.port = 0xFFFF; 1544 exp->mask.dst.u.tcp.port = htons(0xFFFF);
1545 exp->mask.dst.protonum = 0xFF; 1545 exp->mask.dst.protonum = 0xFF;
1546 exp->flags = IP_CT_EXPECT_PERMANENT; 1546 exp->flags = IP_CT_EXPECT_PERMANENT;
1547 exp->expectfn = ip_conntrack_q931_expect; 1547 exp->expectfn = ip_conntrack_q931_expect;
@@ -1581,7 +1581,7 @@ static int process_lcf(struct sk_buff **pskb, struct ip_conntrack *ct,
1581{ 1581{
1582 int dir = CTINFO2DIR(ctinfo); 1582 int dir = CTINFO2DIR(ctinfo);
1583 int ret = 0; 1583 int ret = 0;
1584 u_int32_t ip; 1584 __be32 ip;
1585 u_int16_t port; 1585 u_int16_t port;
1586 struct ip_conntrack_expect *exp = NULL; 1586 struct ip_conntrack_expect *exp = NULL;
1587 1587
@@ -1598,10 +1598,10 @@ static int process_lcf(struct sk_buff **pskb, struct ip_conntrack *ct,
1598 exp->tuple.dst.ip = ip; 1598 exp->tuple.dst.ip = ip;
1599 exp->tuple.dst.u.tcp.port = htons(port); 1599 exp->tuple.dst.u.tcp.port = htons(port);
1600 exp->tuple.dst.protonum = IPPROTO_TCP; 1600 exp->tuple.dst.protonum = IPPROTO_TCP;
1601 exp->mask.src.ip = 0xFFFFFFFF; 1601 exp->mask.src.ip = htonl(0xFFFFFFFF);
1602 exp->mask.src.u.tcp.port = 0; 1602 exp->mask.src.u.tcp.port = 0;
1603 exp->mask.dst.ip = 0xFFFFFFFF; 1603 exp->mask.dst.ip = htonl(0xFFFFFFFF);
1604 exp->mask.dst.u.tcp.port = 0xFFFF; 1604 exp->mask.dst.u.tcp.port = htons(0xFFFF);
1605 exp->mask.dst.protonum = 0xFF; 1605 exp->mask.dst.protonum = 0xFF;
1606 exp->flags = IP_CT_EXPECT_PERMANENT; 1606 exp->flags = IP_CT_EXPECT_PERMANENT;
1607 exp->expectfn = ip_conntrack_q931_expect; 1607 exp->expectfn = ip_conntrack_q931_expect;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index fb0aee691721..a2af5e0c7f99 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -242,10 +242,10 @@ exp_gre(struct ip_conntrack *ct,
242 exp_orig->tuple.dst.u.gre.key = callid; 242 exp_orig->tuple.dst.u.gre.key = callid;
243 exp_orig->tuple.dst.protonum = IPPROTO_GRE; 243 exp_orig->tuple.dst.protonum = IPPROTO_GRE;
244 244
245 exp_orig->mask.src.ip = 0xffffffff; 245 exp_orig->mask.src.ip = htonl(0xffffffff);
246 exp_orig->mask.src.u.all = 0; 246 exp_orig->mask.src.u.all = 0;
247 exp_orig->mask.dst.u.gre.key = htons(0xffff); 247 exp_orig->mask.dst.u.gre.key = htons(0xffff);
248 exp_orig->mask.dst.ip = 0xffffffff; 248 exp_orig->mask.dst.ip = htonl(0xffffffff);
249 exp_orig->mask.dst.protonum = 0xff; 249 exp_orig->mask.dst.protonum = 0xff;
250 250
251 exp_orig->master = ct; 251 exp_orig->master = ct;
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c
index 44889075f3b2..75f7c3db1619 100644
--- a/net/ipv4/netfilter/ip_conntrack_irc.c
+++ b/net/ipv4/netfilter/ip_conntrack_irc.c
@@ -218,7 +218,8 @@ static int help(struct sk_buff **pskb,
218 IPPROTO_TCP }}); 218 IPPROTO_TCP }});
219 exp->mask = ((struct ip_conntrack_tuple) 219 exp->mask = ((struct ip_conntrack_tuple)
220 { { 0, { 0 } }, 220 { { 0, { 0 } },
221 { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }}); 221 { htonl(0xFFFFFFFF),
222 { .tcp = { htons(0xFFFF) } }, 0xFF }});
222 exp->expectfn = NULL; 223 exp->expectfn = NULL;
223 exp->flags = 0; 224 exp->flags = 0;
224 if (ip_nat_irc_hook) 225 if (ip_nat_irc_hook)
@@ -266,7 +267,7 @@ static int __init ip_conntrack_irc_init(void)
266 hlpr = &irc_helpers[i]; 267 hlpr = &irc_helpers[i];
267 hlpr->tuple.src.u.tcp.port = htons(ports[i]); 268 hlpr->tuple.src.u.tcp.port = htons(ports[i]);
268 hlpr->tuple.dst.protonum = IPPROTO_TCP; 269 hlpr->tuple.dst.protonum = IPPROTO_TCP;
269 hlpr->mask.src.u.tcp.port = 0xFFFF; 270 hlpr->mask.src.u.tcp.port = htons(0xFFFF);
270 hlpr->mask.dst.protonum = 0xFF; 271 hlpr->mask.dst.protonum = 0xFF;
271 hlpr->max_expected = max_dcc_channels; 272 hlpr->max_expected = max_dcc_channels;
272 hlpr->timeout = dcc_timeout; 273 hlpr->timeout = dcc_timeout;
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
index 3d0b438783db..a1d6a89f64aa 100644
--- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
+++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
@@ -48,7 +48,7 @@ static int help(struct sk_buff **pskb,
48 struct iphdr *iph = (*pskb)->nh.iph; 48 struct iphdr *iph = (*pskb)->nh.iph;
49 struct rtable *rt = (struct rtable *)(*pskb)->dst; 49 struct rtable *rt = (struct rtable *)(*pskb)->dst;
50 struct in_device *in_dev; 50 struct in_device *in_dev;
51 u_int32_t mask = 0; 51 __be32 mask = 0;
52 52
53 /* we're only interested in locally generated packets */ 53 /* we're only interested in locally generated packets */
54 if ((*pskb)->sk == NULL) 54 if ((*pskb)->sk == NULL)
@@ -78,12 +78,12 @@ static int help(struct sk_buff **pskb,
78 goto out; 78 goto out;
79 79
80 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 80 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
81 exp->tuple.src.u.udp.port = ntohs(NMBD_PORT); 81 exp->tuple.src.u.udp.port = htons(NMBD_PORT);
82 82
83 exp->mask.src.ip = mask; 83 exp->mask.src.ip = mask;
84 exp->mask.src.u.udp.port = 0xFFFF; 84 exp->mask.src.u.udp.port = htons(0xFFFF);
85 exp->mask.dst.ip = 0xFFFFFFFF; 85 exp->mask.dst.ip = htonl(0xFFFFFFFF);
86 exp->mask.dst.u.udp.port = 0xFFFF; 86 exp->mask.dst.u.udp.port = htons(0xFFFF);
87 exp->mask.dst.protonum = 0xFF; 87 exp->mask.dst.protonum = 0xFF;
88 88
89 exp->expectfn = NULL; 89 exp->expectfn = NULL;
@@ -115,7 +115,7 @@ static struct ip_conntrack_helper helper = {
115 .src = { 115 .src = {
116 .u = { 116 .u = {
117 .udp = { 117 .udp = {
118 .port = 0xFFFF, 118 .port = __constant_htons(0xFFFF),
119 } 119 }
120 } 120 }
121 }, 121 },
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 52eddea27e93..262d0d44ec1b 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -44,13 +44,6 @@ MODULE_LICENSE("GPL");
44 44
45static char __initdata version[] = "0.90"; 45static char __initdata version[] = "0.90";
46 46
47#if 0
48#define DEBUGP printk
49#else
50#define DEBUGP(format, args...)
51#endif
52
53
54static inline int 47static inline int
55ctnetlink_dump_tuples_proto(struct sk_buff *skb, 48ctnetlink_dump_tuples_proto(struct sk_buff *skb,
56 const struct ip_conntrack_tuple *tuple, 49 const struct ip_conntrack_tuple *tuple,
@@ -78,8 +71,8 @@ ctnetlink_dump_tuples_ip(struct sk_buff *skb,
78{ 71{
79 struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP); 72 struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP);
80 73
81 NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(u_int32_t), &tuple->src.ip); 74 NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip);
82 NFA_PUT(skb, CTA_IP_V4_DST, sizeof(u_int32_t), &tuple->dst.ip); 75 NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip);
83 76
84 NFA_NEST_END(skb, nest_parms); 77 NFA_NEST_END(skb, nest_parms);
85 78
@@ -110,7 +103,7 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
110static inline int 103static inline int
111ctnetlink_dump_status(struct sk_buff *skb, const struct ip_conntrack *ct) 104ctnetlink_dump_status(struct sk_buff *skb, const struct ip_conntrack *ct)
112{ 105{
113 u_int32_t status = htonl((u_int32_t) ct->status); 106 __be32 status = htonl((u_int32_t) ct->status);
114 NFA_PUT(skb, CTA_STATUS, sizeof(status), &status); 107 NFA_PUT(skb, CTA_STATUS, sizeof(status), &status);
115 return 0; 108 return 0;
116 109
@@ -122,7 +115,7 @@ static inline int
122ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct) 115ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct)
123{ 116{
124 long timeout_l = ct->timeout.expires - jiffies; 117 long timeout_l = ct->timeout.expires - jiffies;
125 u_int32_t timeout; 118 __be32 timeout;
126 119
127 if (timeout_l < 0) 120 if (timeout_l < 0)
128 timeout = 0; 121 timeout = 0;
@@ -192,13 +185,13 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct,
192{ 185{
193 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; 186 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
194 struct nfattr *nest_count = NFA_NEST(skb, type); 187 struct nfattr *nest_count = NFA_NEST(skb, type);
195 u_int32_t tmp; 188 __be32 tmp;
196 189
197 tmp = htonl(ct->counters[dir].packets); 190 tmp = htonl(ct->counters[dir].packets);
198 NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); 191 NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(__be32), &tmp);
199 192
200 tmp = htonl(ct->counters[dir].bytes); 193 tmp = htonl(ct->counters[dir].bytes);
201 NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp); 194 NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(__be32), &tmp);
202 195
203 NFA_NEST_END(skb, nest_count); 196 NFA_NEST_END(skb, nest_count);
204 197
@@ -215,9 +208,9 @@ nfattr_failure:
215static inline int 208static inline int
216ctnetlink_dump_mark(struct sk_buff *skb, const struct ip_conntrack *ct) 209ctnetlink_dump_mark(struct sk_buff *skb, const struct ip_conntrack *ct)
217{ 210{
218 u_int32_t mark = htonl(ct->mark); 211 __be32 mark = htonl(ct->mark);
219 212
220 NFA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark); 213 NFA_PUT(skb, CTA_MARK, sizeof(__be32), &mark);
221 return 0; 214 return 0;
222 215
223nfattr_failure: 216nfattr_failure:
@@ -230,8 +223,8 @@ nfattr_failure:
230static inline int 223static inline int
231ctnetlink_dump_id(struct sk_buff *skb, const struct ip_conntrack *ct) 224ctnetlink_dump_id(struct sk_buff *skb, const struct ip_conntrack *ct)
232{ 225{
233 u_int32_t id = htonl(ct->id); 226 __be32 id = htonl(ct->id);
234 NFA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id); 227 NFA_PUT(skb, CTA_ID, sizeof(__be32), &id);
235 return 0; 228 return 0;
236 229
237nfattr_failure: 230nfattr_failure:
@@ -241,9 +234,9 @@ nfattr_failure:
241static inline int 234static inline int
242ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct) 235ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct)
243{ 236{
244 u_int32_t use = htonl(atomic_read(&ct->ct_general.use)); 237 __be32 use = htonl(atomic_read(&ct->ct_general.use));
245 238
246 NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); 239 NFA_PUT(skb, CTA_USE, sizeof(__be32), &use);
247 return 0; 240 return 0;
248 241
249nfattr_failure: 242nfattr_failure:
@@ -398,7 +391,6 @@ nfattr_failure:
398 391
399static int ctnetlink_done(struct netlink_callback *cb) 392static int ctnetlink_done(struct netlink_callback *cb)
400{ 393{
401 DEBUGP("entered %s\n", __FUNCTION__);
402 if (cb->args[1]) 394 if (cb->args[1])
403 ip_conntrack_put((struct ip_conntrack *)cb->args[1]); 395 ip_conntrack_put((struct ip_conntrack *)cb->args[1]);
404 return 0; 396 return 0;
@@ -411,9 +403,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
411 struct ip_conntrack_tuple_hash *h; 403 struct ip_conntrack_tuple_hash *h;
412 struct list_head *i; 404 struct list_head *i;
413 405
414 DEBUGP("entered %s, last bucket=%lu id=%u\n", __FUNCTION__,
415 cb->args[0], *id);
416
417 read_lock_bh(&ip_conntrack_lock); 406 read_lock_bh(&ip_conntrack_lock);
418 last = (struct ip_conntrack *)cb->args[1]; 407 last = (struct ip_conntrack *)cb->args[1];
419 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { 408 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
@@ -452,13 +441,12 @@ out:
452 if (last) 441 if (last)
453 ip_conntrack_put(last); 442 ip_conntrack_put(last);
454 443
455 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
456 return skb->len; 444 return skb->len;
457} 445}
458 446
459static const size_t cta_min_ip[CTA_IP_MAX] = { 447static const size_t cta_min_ip[CTA_IP_MAX] = {
460 [CTA_IP_V4_SRC-1] = sizeof(u_int32_t), 448 [CTA_IP_V4_SRC-1] = sizeof(__be32),
461 [CTA_IP_V4_DST-1] = sizeof(u_int32_t), 449 [CTA_IP_V4_DST-1] = sizeof(__be32),
462}; 450};
463 451
464static inline int 452static inline int
@@ -466,8 +454,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
466{ 454{
467 struct nfattr *tb[CTA_IP_MAX]; 455 struct nfattr *tb[CTA_IP_MAX];
468 456
469 DEBUGP("entered %s\n", __FUNCTION__);
470
471 nfattr_parse_nested(tb, CTA_IP_MAX, attr); 457 nfattr_parse_nested(tb, CTA_IP_MAX, attr);
472 458
473 if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) 459 if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
@@ -475,13 +461,11 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
475 461
476 if (!tb[CTA_IP_V4_SRC-1]) 462 if (!tb[CTA_IP_V4_SRC-1])
477 return -EINVAL; 463 return -EINVAL;
478 tuple->src.ip = *(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_SRC-1]); 464 tuple->src.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_SRC-1]);
479 465
480 if (!tb[CTA_IP_V4_DST-1]) 466 if (!tb[CTA_IP_V4_DST-1])
481 return -EINVAL; 467 return -EINVAL;
482 tuple->dst.ip = *(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_DST-1]); 468 tuple->dst.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]);
483
484 DEBUGP("leaving\n");
485 469
486 return 0; 470 return 0;
487} 471}
@@ -503,8 +487,6 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
503 struct ip_conntrack_protocol *proto; 487 struct ip_conntrack_protocol *proto;
504 int ret = 0; 488 int ret = 0;
505 489
506 DEBUGP("entered %s\n", __FUNCTION__);
507
508 nfattr_parse_nested(tb, CTA_PROTO_MAX, attr); 490 nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
509 491
510 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) 492 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
@@ -531,8 +513,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
531 struct nfattr *tb[CTA_TUPLE_MAX]; 513 struct nfattr *tb[CTA_TUPLE_MAX];
532 int err; 514 int err;
533 515
534 DEBUGP("entered %s\n", __FUNCTION__);
535
536 memset(tuple, 0, sizeof(*tuple)); 516 memset(tuple, 0, sizeof(*tuple));
537 517
538 nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]); 518 nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]);
@@ -557,10 +537,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
557 else 537 else
558 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 538 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
559 539
560 DUMP_TUPLE(tuple);
561
562 DEBUGP("leaving\n");
563
564 return 0; 540 return 0;
565} 541}
566 542
@@ -577,8 +553,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
577 struct nfattr *tb[CTA_PROTONAT_MAX]; 553 struct nfattr *tb[CTA_PROTONAT_MAX];
578 struct ip_nat_protocol *npt; 554 struct ip_nat_protocol *npt;
579 555
580 DEBUGP("entered %s\n", __FUNCTION__);
581
582 nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr); 556 nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
583 557
584 if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat)) 558 if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
@@ -597,13 +571,12 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
597 571
598 ip_nat_proto_put(npt); 572 ip_nat_proto_put(npt);
599 573
600 DEBUGP("leaving\n");
601 return 0; 574 return 0;
602} 575}
603 576
604static const size_t cta_min_nat[CTA_NAT_MAX] = { 577static const size_t cta_min_nat[CTA_NAT_MAX] = {
605 [CTA_NAT_MINIP-1] = sizeof(u_int32_t), 578 [CTA_NAT_MINIP-1] = sizeof(__be32),
606 [CTA_NAT_MAXIP-1] = sizeof(u_int32_t), 579 [CTA_NAT_MAXIP-1] = sizeof(__be32),
607}; 580};
608 581
609static inline int 582static inline int
@@ -613,8 +586,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
613 struct nfattr *tb[CTA_NAT_MAX]; 586 struct nfattr *tb[CTA_NAT_MAX];
614 int err; 587 int err;
615 588
616 DEBUGP("entered %s\n", __FUNCTION__);
617
618 memset(range, 0, sizeof(*range)); 589 memset(range, 0, sizeof(*range));
619 590
620 nfattr_parse_nested(tb, CTA_NAT_MAX, nat); 591 nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
@@ -623,12 +594,12 @@ ctnetlink_parse_nat(struct nfattr *nat,
623 return -EINVAL; 594 return -EINVAL;
624 595
625 if (tb[CTA_NAT_MINIP-1]) 596 if (tb[CTA_NAT_MINIP-1])
626 range->min_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MINIP-1]); 597 range->min_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MINIP-1]);
627 598
628 if (!tb[CTA_NAT_MAXIP-1]) 599 if (!tb[CTA_NAT_MAXIP-1])
629 range->max_ip = range->min_ip; 600 range->max_ip = range->min_ip;
630 else 601 else
631 range->max_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MAXIP-1]); 602 range->max_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MAXIP-1]);
632 603
633 if (range->min_ip) 604 if (range->min_ip)
634 range->flags |= IP_NAT_RANGE_MAP_IPS; 605 range->flags |= IP_NAT_RANGE_MAP_IPS;
@@ -640,7 +611,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
640 if (err < 0) 611 if (err < 0)
641 return err; 612 return err;
642 613
643 DEBUGP("leaving\n");
644 return 0; 614 return 0;
645} 615}
646#endif 616#endif
@@ -650,8 +620,6 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
650{ 620{
651 struct nfattr *tb[CTA_HELP_MAX]; 621 struct nfattr *tb[CTA_HELP_MAX];
652 622
653 DEBUGP("entered %s\n", __FUNCTION__);
654
655 nfattr_parse_nested(tb, CTA_HELP_MAX, attr); 623 nfattr_parse_nested(tb, CTA_HELP_MAX, attr);
656 624
657 if (!tb[CTA_HELP_NAME-1]) 625 if (!tb[CTA_HELP_NAME-1])
@@ -663,11 +631,11 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
663} 631}
664 632
665static const size_t cta_min[CTA_MAX] = { 633static const size_t cta_min[CTA_MAX] = {
666 [CTA_STATUS-1] = sizeof(u_int32_t), 634 [CTA_STATUS-1] = sizeof(__be32),
667 [CTA_TIMEOUT-1] = sizeof(u_int32_t), 635 [CTA_TIMEOUT-1] = sizeof(__be32),
668 [CTA_MARK-1] = sizeof(u_int32_t), 636 [CTA_MARK-1] = sizeof(__be32),
669 [CTA_USE-1] = sizeof(u_int32_t), 637 [CTA_USE-1] = sizeof(__be32),
670 [CTA_ID-1] = sizeof(u_int32_t) 638 [CTA_ID-1] = sizeof(__be32)
671}; 639};
672 640
673static int 641static int
@@ -679,8 +647,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
679 struct ip_conntrack *ct; 647 struct ip_conntrack *ct;
680 int err = 0; 648 int err = 0;
681 649
682 DEBUGP("entered %s\n", __FUNCTION__);
683
684 if (nfattr_bad_size(cda, CTA_MAX, cta_min)) 650 if (nfattr_bad_size(cda, CTA_MAX, cta_min))
685 return -EINVAL; 651 return -EINVAL;
686 652
@@ -698,15 +664,13 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
698 return err; 664 return err;
699 665
700 h = ip_conntrack_find_get(&tuple, NULL); 666 h = ip_conntrack_find_get(&tuple, NULL);
701 if (!h) { 667 if (!h)
702 DEBUGP("tuple not found in conntrack hash\n");
703 return -ENOENT; 668 return -ENOENT;
704 }
705 669
706 ct = tuplehash_to_ctrack(h); 670 ct = tuplehash_to_ctrack(h);
707 671
708 if (cda[CTA_ID-1]) { 672 if (cda[CTA_ID-1]) {
709 u_int32_t id = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_ID-1])); 673 u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1]));
710 if (ct->id != id) { 674 if (ct->id != id) {
711 ip_conntrack_put(ct); 675 ip_conntrack_put(ct);
712 return -ENOENT; 676 return -ENOENT;
@@ -716,7 +680,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
716 ct->timeout.function((unsigned long)ct); 680 ct->timeout.function((unsigned long)ct);
717 681
718 ip_conntrack_put(ct); 682 ip_conntrack_put(ct);
719 DEBUGP("leaving\n");
720 683
721 return 0; 684 return 0;
722} 685}
@@ -731,8 +694,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
731 struct sk_buff *skb2 = NULL; 694 struct sk_buff *skb2 = NULL;
732 int err = 0; 695 int err = 0;
733 696
734 DEBUGP("entered %s\n", __FUNCTION__);
735
736 if (nlh->nlmsg_flags & NLM_F_DUMP) { 697 if (nlh->nlmsg_flags & NLM_F_DUMP) {
737 struct nfgenmsg *msg = NLMSG_DATA(nlh); 698 struct nfgenmsg *msg = NLMSG_DATA(nlh);
738 u32 rlen; 699 u32 rlen;
@@ -770,11 +731,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
770 return err; 731 return err;
771 732
772 h = ip_conntrack_find_get(&tuple, NULL); 733 h = ip_conntrack_find_get(&tuple, NULL);
773 if (!h) { 734 if (!h)
774 DEBUGP("tuple not found in conntrack hash");
775 return -ENOENT; 735 return -ENOENT;
776 } 736
777 DEBUGP("tuple found\n");
778 ct = tuplehash_to_ctrack(h); 737 ct = tuplehash_to_ctrack(h);
779 738
780 err = -ENOMEM; 739 err = -ENOMEM;
@@ -795,7 +754,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
795 if (err < 0) 754 if (err < 0)
796 goto out; 755 goto out;
797 756
798 DEBUGP("leaving\n");
799 return 0; 757 return 0;
800 758
801free: 759free:
@@ -808,7 +766,7 @@ static inline int
808ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[]) 766ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
809{ 767{
810 unsigned long d; 768 unsigned long d;
811 unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1])); 769 unsigned status = ntohl(*(__be32 *)NFA_DATA(cda[CTA_STATUS-1]));
812 d = ct->status ^ status; 770 d = ct->status ^ status;
813 771
814 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 772 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
@@ -866,8 +824,6 @@ ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
866 char *helpname; 824 char *helpname;
867 int err; 825 int err;
868 826
869 DEBUGP("entered %s\n", __FUNCTION__);
870
871 /* don't change helper of sibling connections */ 827 /* don't change helper of sibling connections */
872 if (ct->master) 828 if (ct->master)
873 return -EINVAL; 829 return -EINVAL;
@@ -903,7 +859,7 @@ ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
903static inline int 859static inline int
904ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) 860ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
905{ 861{
906 u_int32_t timeout = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1])); 862 u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
907 863
908 if (!del_timer(&ct->timeout)) 864 if (!del_timer(&ct->timeout))
909 return -ETIME; 865 return -ETIME;
@@ -938,8 +894,6 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
938{ 894{
939 int err; 895 int err;
940 896
941 DEBUGP("entered %s\n", __FUNCTION__);
942
943 if (cda[CTA_HELP-1]) { 897 if (cda[CTA_HELP-1]) {
944 err = ctnetlink_change_helper(ct, cda); 898 err = ctnetlink_change_helper(ct, cda);
945 if (err < 0) 899 if (err < 0)
@@ -966,10 +920,9 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
966 920
967#if defined(CONFIG_IP_NF_CONNTRACK_MARK) 921#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
968 if (cda[CTA_MARK-1]) 922 if (cda[CTA_MARK-1])
969 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); 923 ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
970#endif 924#endif
971 925
972 DEBUGP("all done\n");
973 return 0; 926 return 0;
974} 927}
975 928
@@ -981,15 +934,13 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
981 struct ip_conntrack *ct; 934 struct ip_conntrack *ct;
982 int err = -EINVAL; 935 int err = -EINVAL;
983 936
984 DEBUGP("entered %s\n", __FUNCTION__);
985
986 ct = ip_conntrack_alloc(otuple, rtuple); 937 ct = ip_conntrack_alloc(otuple, rtuple);
987 if (ct == NULL || IS_ERR(ct)) 938 if (ct == NULL || IS_ERR(ct))
988 return -ENOMEM; 939 return -ENOMEM;
989 940
990 if (!cda[CTA_TIMEOUT-1]) 941 if (!cda[CTA_TIMEOUT-1])
991 goto err; 942 goto err;
992 ct->timeout.expires = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1])); 943 ct->timeout.expires = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
993 944
994 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 945 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
995 ct->status |= IPS_CONFIRMED; 946 ct->status |= IPS_CONFIRMED;
@@ -1006,7 +957,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
1006 957
1007#if defined(CONFIG_IP_NF_CONNTRACK_MARK) 958#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
1008 if (cda[CTA_MARK-1]) 959 if (cda[CTA_MARK-1])
1009 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); 960 ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
1010#endif 961#endif
1011 962
1012 ct->helper = ip_conntrack_helper_find_get(rtuple); 963 ct->helper = ip_conntrack_helper_find_get(rtuple);
@@ -1017,7 +968,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
1017 if (ct->helper) 968 if (ct->helper)
1018 ip_conntrack_helper_put(ct->helper); 969 ip_conntrack_helper_put(ct->helper);
1019 970
1020 DEBUGP("conntrack with id %u inserted\n", ct->id);
1021 return 0; 971 return 0;
1022 972
1023err: 973err:
@@ -1033,8 +983,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1033 struct ip_conntrack_tuple_hash *h = NULL; 983 struct ip_conntrack_tuple_hash *h = NULL;
1034 int err = 0; 984 int err = 0;
1035 985
1036 DEBUGP("entered %s\n", __FUNCTION__);
1037
1038 if (nfattr_bad_size(cda, CTA_MAX, cta_min)) 986 if (nfattr_bad_size(cda, CTA_MAX, cta_min))
1039 return -EINVAL; 987 return -EINVAL;
1040 988
@@ -1058,7 +1006,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1058 1006
1059 if (h == NULL) { 1007 if (h == NULL) {
1060 write_unlock_bh(&ip_conntrack_lock); 1008 write_unlock_bh(&ip_conntrack_lock);
1061 DEBUGP("no such conntrack, create new\n");
1062 err = -ENOENT; 1009 err = -ENOENT;
1063 if (nlh->nlmsg_flags & NLM_F_CREATE) 1010 if (nlh->nlmsg_flags & NLM_F_CREATE)
1064 err = ctnetlink_create_conntrack(cda, &otuple, &rtuple); 1011 err = ctnetlink_create_conntrack(cda, &otuple, &rtuple);
@@ -1074,7 +1021,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1074 1021
1075 /* We manipulate the conntrack inside the global conntrack table lock, 1022 /* We manipulate the conntrack inside the global conntrack table lock,
1076 * so there's no need to increase the refcount */ 1023 * so there's no need to increase the refcount */
1077 DEBUGP("conntrack found\n");
1078 err = -EEXIST; 1024 err = -EEXIST;
1079 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) 1025 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
1080 err = ctnetlink_change_conntrack(tuplehash_to_ctrack(h), cda); 1026 err = ctnetlink_change_conntrack(tuplehash_to_ctrack(h), cda);
@@ -1138,8 +1084,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1138 const struct ip_conntrack_expect *exp) 1084 const struct ip_conntrack_expect *exp)
1139{ 1085{
1140 struct ip_conntrack *master = exp->master; 1086 struct ip_conntrack *master = exp->master;
1141 u_int32_t timeout = htonl((exp->timeout.expires - jiffies) / HZ); 1087 __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ);
1142 u_int32_t id = htonl(exp->id); 1088 __be32 id = htonl(exp->id);
1143 1089
1144 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) 1090 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
1145 goto nfattr_failure; 1091 goto nfattr_failure;
@@ -1150,8 +1096,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1150 CTA_EXPECT_MASTER) < 0) 1096 CTA_EXPECT_MASTER) < 0)
1151 goto nfattr_failure; 1097 goto nfattr_failure;
1152 1098
1153 NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(timeout), &timeout); 1099 NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout);
1154 NFA_PUT(skb, CTA_EXPECT_ID, sizeof(u_int32_t), &id); 1100 NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id);
1155 1101
1156 return 0; 1102 return 0;
1157 1103
@@ -1249,8 +1195,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1249 struct list_head *i; 1195 struct list_head *i;
1250 u_int32_t *id = (u_int32_t *) &cb->args[0]; 1196 u_int32_t *id = (u_int32_t *) &cb->args[0];
1251 1197
1252 DEBUGP("entered %s, last id=%llu\n", __FUNCTION__, *id);
1253
1254 read_lock_bh(&ip_conntrack_lock); 1198 read_lock_bh(&ip_conntrack_lock);
1255 list_for_each_prev(i, &ip_conntrack_expect_list) { 1199 list_for_each_prev(i, &ip_conntrack_expect_list) {
1256 exp = (struct ip_conntrack_expect *) i; 1200 exp = (struct ip_conntrack_expect *) i;
@@ -1266,14 +1210,12 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1266out: 1210out:
1267 read_unlock_bh(&ip_conntrack_lock); 1211 read_unlock_bh(&ip_conntrack_lock);
1268 1212
1269 DEBUGP("leaving, last id=%llu\n", *id);
1270
1271 return skb->len; 1213 return skb->len;
1272} 1214}
1273 1215
1274static const size_t cta_min_exp[CTA_EXPECT_MAX] = { 1216static const size_t cta_min_exp[CTA_EXPECT_MAX] = {
1275 [CTA_EXPECT_TIMEOUT-1] = sizeof(u_int32_t), 1217 [CTA_EXPECT_TIMEOUT-1] = sizeof(__be32),
1276 [CTA_EXPECT_ID-1] = sizeof(u_int32_t) 1218 [CTA_EXPECT_ID-1] = sizeof(__be32)
1277}; 1219};
1278 1220
1279static int 1221static int
@@ -1285,8 +1227,6 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1285 struct sk_buff *skb2; 1227 struct sk_buff *skb2;
1286 int err = 0; 1228 int err = 0;
1287 1229
1288 DEBUGP("entered %s\n", __FUNCTION__);
1289
1290 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) 1230 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
1291 return -EINVAL; 1231 return -EINVAL;
1292 1232
@@ -1321,7 +1261,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1321 return -ENOENT; 1261 return -ENOENT;
1322 1262
1323 if (cda[CTA_EXPECT_ID-1]) { 1263 if (cda[CTA_EXPECT_ID-1]) {
1324 u_int32_t id = *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]); 1264 __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
1325 if (exp->id != ntohl(id)) { 1265 if (exp->id != ntohl(id)) {
1326 ip_conntrack_expect_put(exp); 1266 ip_conntrack_expect_put(exp);
1327 return -ENOENT; 1267 return -ENOENT;
@@ -1375,8 +1315,8 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1375 return -ENOENT; 1315 return -ENOENT;
1376 1316
1377 if (cda[CTA_EXPECT_ID-1]) { 1317 if (cda[CTA_EXPECT_ID-1]) {
1378 u_int32_t id = 1318 __be32 id =
1379 *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]); 1319 *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
1380 if (exp->id != ntohl(id)) { 1320 if (exp->id != ntohl(id)) {
1381 ip_conntrack_expect_put(exp); 1321 ip_conntrack_expect_put(exp);
1382 return -ENOENT; 1322 return -ENOENT;
@@ -1437,8 +1377,6 @@ ctnetlink_create_expect(struct nfattr *cda[])
1437 struct ip_conntrack *ct; 1377 struct ip_conntrack *ct;
1438 int err = 0; 1378 int err = 0;
1439 1379
1440 DEBUGP("entered %s\n", __FUNCTION__);
1441
1442 /* caller guarantees that those three CTA_EXPECT_* exist */ 1380 /* caller guarantees that those three CTA_EXPECT_* exist */
1443 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE); 1381 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
1444 if (err < 0) 1382 if (err < 0)
@@ -1490,8 +1428,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1490 struct ip_conntrack_expect *exp; 1428 struct ip_conntrack_expect *exp;
1491 int err = 0; 1429 int err = 0;
1492 1430
1493 DEBUGP("entered %s\n", __FUNCTION__);
1494
1495 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) 1431 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
1496 return -EINVAL; 1432 return -EINVAL;
1497 1433
@@ -1520,8 +1456,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1520 err = ctnetlink_change_expect(exp, cda); 1456 err = ctnetlink_change_expect(exp, cda);
1521 write_unlock_bh(&ip_conntrack_lock); 1457 write_unlock_bh(&ip_conntrack_lock);
1522 1458
1523 DEBUGP("leaving\n");
1524
1525 return err; 1459 return err;
1526} 1460}
1527 1461
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 09c40ebe3345..295b6fa340db 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -261,7 +261,7 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
261static int icmp_tuple_to_nfattr(struct sk_buff *skb, 261static int icmp_tuple_to_nfattr(struct sk_buff *skb,
262 const struct ip_conntrack_tuple *t) 262 const struct ip_conntrack_tuple *t)
263{ 263{
264 NFA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(u_int16_t), 264 NFA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(__be16),
265 &t->src.u.icmp.id); 265 &t->src.u.icmp.id);
266 NFA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t), 266 NFA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t),
267 &t->dst.u.icmp.type); 267 &t->dst.u.icmp.type);
@@ -287,7 +287,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
287 tuple->dst.u.icmp.code = 287 tuple->dst.u.icmp.code =
288 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); 288 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
289 tuple->src.u.icmp.id = 289 tuple->src.u.icmp.id =
290 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]); 290 *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
291 291
292 if (tuple->dst.u.icmp.type >= sizeof(invmap) 292 if (tuple->dst.u.icmp.type >= sizeof(invmap)
293 || !invmap[tuple->dst.u.icmp.type]) 293 || !invmap[tuple->dst.u.icmp.type])
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index b908a4842e18..2443322e4128 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -210,7 +210,7 @@ static int sctp_print_conntrack(struct seq_file *s,
210for (offset = skb->nh.iph->ihl * 4 + sizeof(sctp_sctphdr_t), count = 0; \ 210for (offset = skb->nh.iph->ihl * 4 + sizeof(sctp_sctphdr_t), count = 0; \
211 offset < skb->len && \ 211 offset < skb->len && \
212 (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \ 212 (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \
213 offset += (htons(sch->length) + 3) & ~3, count++) 213 offset += (ntohs(sch->length) + 3) & ~3, count++)
214 214
215/* Some validity checks to make sure the chunks are fine */ 215/* Some validity checks to make sure the chunks are fine */
216static int do_basic_checks(struct ip_conntrack *conntrack, 216static int do_basic_checks(struct ip_conntrack *conntrack,
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 03ae9a04cb37..06e4e8a6dd9f 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -519,8 +519,8 @@ static void tcp_sack(const struct sk_buff *skb,
519 519
520 /* Fast path for timestamp-only option */ 520 /* Fast path for timestamp-only option */
521 if (length == TCPOLEN_TSTAMP_ALIGNED*4 521 if (length == TCPOLEN_TSTAMP_ALIGNED*4
522 && *(__u32 *)ptr == 522 && *(__be32 *)ptr ==
523 __constant_ntohl((TCPOPT_NOP << 24) 523 __constant_htonl((TCPOPT_NOP << 24)
524 | (TCPOPT_NOP << 16) 524 | (TCPOPT_NOP << 16)
525 | (TCPOPT_TIMESTAMP << 8) 525 | (TCPOPT_TIMESTAMP << 8)
526 | TCPOLEN_TIMESTAMP)) 526 | TCPOLEN_TIMESTAMP))
@@ -551,7 +551,7 @@ static void tcp_sack(const struct sk_buff *skb,
551 for (i = 0; 551 for (i = 0;
552 i < (opsize - TCPOLEN_SACK_BASE); 552 i < (opsize - TCPOLEN_SACK_BASE);
553 i += TCPOLEN_SACK_PERBLOCK) { 553 i += TCPOLEN_SACK_PERBLOCK) {
554 tmp = ntohl(*((u_int32_t *)(ptr+i)+1)); 554 tmp = ntohl(*((__be32 *)(ptr+i)+1));
555 555
556 if (after(tmp, *sack)) 556 if (after(tmp, *sack))
557 *sack = tmp; 557 *sack = tmp;
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c
index 2893e9c74850..f4f75995a9e4 100644
--- a/net/ipv4/netfilter/ip_conntrack_sip.c
+++ b/net/ipv4/netfilter/ip_conntrack_sip.c
@@ -193,7 +193,7 @@ static int skp_digits_len(const char *dptr, const char *limit, int *shift)
193 193
194/* Simple ipaddr parser.. */ 194/* Simple ipaddr parser.. */
195static int parse_ipaddr(const char *cp, const char **endp, 195static int parse_ipaddr(const char *cp, const char **endp,
196 u_int32_t *ipaddr, const char *limit) 196 __be32 *ipaddr, const char *limit)
197{ 197{
198 unsigned long int val; 198 unsigned long int val;
199 int i, digit = 0; 199 int i, digit = 0;
@@ -227,7 +227,7 @@ static int parse_ipaddr(const char *cp, const char **endp,
227static int epaddr_len(const char *dptr, const char *limit, int *shift) 227static int epaddr_len(const char *dptr, const char *limit, int *shift)
228{ 228{
229 const char *aux = dptr; 229 const char *aux = dptr;
230 u_int32_t ip; 230 __be32 ip;
231 231
232 if (parse_ipaddr(dptr, &dptr, &ip, limit) < 0) { 232 if (parse_ipaddr(dptr, &dptr, &ip, limit) < 0) {
233 DEBUGP("ip: %s parse failed.!\n", dptr); 233 DEBUGP("ip: %s parse failed.!\n", dptr);
@@ -302,7 +302,7 @@ int ct_sip_get_info(const char *dptr, size_t dlen,
302static int set_expected_rtp(struct sk_buff **pskb, 302static int set_expected_rtp(struct sk_buff **pskb,
303 struct ip_conntrack *ct, 303 struct ip_conntrack *ct,
304 enum ip_conntrack_info ctinfo, 304 enum ip_conntrack_info ctinfo,
305 u_int32_t ipaddr, u_int16_t port, 305 __be32 ipaddr, u_int16_t port,
306 const char *dptr) 306 const char *dptr)
307{ 307{
308 struct ip_conntrack_expect *exp; 308 struct ip_conntrack_expect *exp;
@@ -319,10 +319,10 @@ static int set_expected_rtp(struct sk_buff **pskb,
319 exp->tuple.dst.u.udp.port = htons(port); 319 exp->tuple.dst.u.udp.port = htons(port);
320 exp->tuple.dst.protonum = IPPROTO_UDP; 320 exp->tuple.dst.protonum = IPPROTO_UDP;
321 321
322 exp->mask.src.ip = 0xFFFFFFFF; 322 exp->mask.src.ip = htonl(0xFFFFFFFF);
323 exp->mask.src.u.udp.port = 0; 323 exp->mask.src.u.udp.port = 0;
324 exp->mask.dst.ip = 0xFFFFFFFF; 324 exp->mask.dst.ip = htonl(0xFFFFFFFF);
325 exp->mask.dst.u.udp.port = 0xFFFF; 325 exp->mask.dst.u.udp.port = htons(0xFFFF);
326 exp->mask.dst.protonum = 0xFF; 326 exp->mask.dst.protonum = 0xFF;
327 327
328 exp->expectfn = NULL; 328 exp->expectfn = NULL;
@@ -349,7 +349,7 @@ static int sip_help(struct sk_buff **pskb,
349 const char *dptr; 349 const char *dptr;
350 int ret = NF_ACCEPT; 350 int ret = NF_ACCEPT;
351 int matchoff, matchlen; 351 int matchoff, matchlen;
352 u_int32_t ipaddr; 352 __be32 ipaddr;
353 u_int16_t port; 353 u_int16_t port;
354 354
355 /* No Data ? */ 355 /* No Data ? */
@@ -439,7 +439,7 @@ static int __init init(void)
439 439
440 sip[i].tuple.dst.protonum = IPPROTO_UDP; 440 sip[i].tuple.dst.protonum = IPPROTO_UDP;
441 sip[i].tuple.src.u.udp.port = htons(ports[i]); 441 sip[i].tuple.src.u.udp.port = htons(ports[i]);
442 sip[i].mask.src.u.udp.port = 0xFFFF; 442 sip[i].mask.src.u.udp.port = htons(0xFFFF);
443 sip[i].mask.dst.protonum = 0xFF; 443 sip[i].mask.dst.protonum = 0xFF;
444 sip[i].max_expected = 2; 444 sip[i].max_expected = 2;
445 sip[i].timeout = 3 * 60; /* 3 minutes */ 445 sip[i].timeout = 3 * 60; /* 3 minutes */
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index 7e33d3bed5e3..fe0b634dd377 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -70,10 +70,10 @@ static int tftp_help(struct sk_buff **pskb,
70 return NF_DROP; 70 return NF_DROP;
71 71
72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
73 exp->mask.src.ip = 0xffffffff; 73 exp->mask.src.ip = htonl(0xffffffff);
74 exp->mask.src.u.udp.port = 0; 74 exp->mask.src.u.udp.port = 0;
75 exp->mask.dst.ip = 0xffffffff; 75 exp->mask.dst.ip = htonl(0xffffffff);
76 exp->mask.dst.u.udp.port = 0xffff; 76 exp->mask.dst.u.udp.port = htons(0xffff);
77 exp->mask.dst.protonum = 0xff; 77 exp->mask.dst.protonum = 0xff;
78 exp->expectfn = NULL; 78 exp->expectfn = NULL;
79 exp->flags = 0; 79 exp->flags = 0;
@@ -129,7 +129,7 @@ static int __init ip_conntrack_tftp_init(void)
129 tftp[i].tuple.dst.protonum = IPPROTO_UDP; 129 tftp[i].tuple.dst.protonum = IPPROTO_UDP;
130 tftp[i].tuple.src.u.udp.port = htons(ports[i]); 130 tftp[i].tuple.src.u.udp.port = htons(ports[i]);
131 tftp[i].mask.dst.protonum = 0xFF; 131 tftp[i].mask.dst.protonum = 0xFF;
132 tftp[i].mask.src.u.udp.port = 0xFFFF; 132 tftp[i].mask.src.u.udp.port = htons(0xFFFF);
133 tftp[i].max_expected = 1; 133 tftp[i].max_expected = 1;
134 tftp[i].timeout = 5 * 60; /* 5 minutes */ 134 tftp[i].timeout = 5 * 60; /* 5 minutes */
135 tftp[i].me = THIS_MODULE; 135 tftp[i].me = THIS_MODULE;
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 71f3e09cbc84..4b6260a97408 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -82,7 +82,7 @@ static inline unsigned int
82hash_by_src(const struct ip_conntrack_tuple *tuple) 82hash_by_src(const struct ip_conntrack_tuple *tuple)
83{ 83{
84 /* Original src, to ensure we map it consistently if poss. */ 84 /* Original src, to ensure we map it consistently if poss. */
85 return jhash_3words(tuple->src.ip, tuple->src.u.all, 85 return jhash_3words((__force u32)tuple->src.ip, tuple->src.u.all,
86 tuple->dst.protonum, 0) % ip_nat_htable_size; 86 tuple->dst.protonum, 0) % ip_nat_htable_size;
87} 87}
88 88
@@ -190,7 +190,7 @@ find_best_ips_proto(struct ip_conntrack_tuple *tuple,
190 const struct ip_conntrack *conntrack, 190 const struct ip_conntrack *conntrack,
191 enum ip_nat_manip_type maniptype) 191 enum ip_nat_manip_type maniptype)
192{ 192{
193 u_int32_t *var_ipp; 193 __be32 *var_ipp;
194 /* Host order */ 194 /* Host order */
195 u_int32_t minip, maxip, j; 195 u_int32_t minip, maxip, j;
196 196
@@ -217,7 +217,7 @@ find_best_ips_proto(struct ip_conntrack_tuple *tuple,
217 * like this), even across reboots. */ 217 * like this), even across reboots. */
218 minip = ntohl(range->min_ip); 218 minip = ntohl(range->min_ip);
219 maxip = ntohl(range->max_ip); 219 maxip = ntohl(range->max_ip);
220 j = jhash_2words(tuple->src.ip, tuple->dst.ip, 0); 220 j = jhash_2words((__force u32)tuple->src.ip, (__force u32)tuple->dst.ip, 0);
221 *var_ipp = htonl(minip + j % (maxip - minip + 1)); 221 *var_ipp = htonl(minip + j % (maxip - minip + 1));
222} 222}
223 223
@@ -534,9 +534,9 @@ int
534ip_nat_port_range_to_nfattr(struct sk_buff *skb, 534ip_nat_port_range_to_nfattr(struct sk_buff *skb,
535 const struct ip_nat_range *range) 535 const struct ip_nat_range *range)
536{ 536{
537 NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(u_int16_t), 537 NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
538 &range->min.tcp.port); 538 &range->min.tcp.port);
539 NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(u_int16_t), 539 NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16),
540 &range->max.tcp.port); 540 &range->max.tcp.port);
541 541
542 return 0; 542 return 0;
@@ -555,7 +555,7 @@ ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
555 if (tb[CTA_PROTONAT_PORT_MIN-1]) { 555 if (tb[CTA_PROTONAT_PORT_MIN-1]) {
556 ret = 1; 556 ret = 1;
557 range->min.tcp.port = 557 range->min.tcp.port =
558 *(u_int16_t *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); 558 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
559 } 559 }
560 560
561 if (!tb[CTA_PROTONAT_PORT_MAX-1]) { 561 if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
@@ -564,7 +564,7 @@ ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
564 } else { 564 } else {
565 ret = 1; 565 ret = 1;
566 range->max.tcp.port = 566 range->max.tcp.port =
567 *(u_int16_t *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); 567 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
568 } 568 }
569 569
570 return ret; 570 return ret;
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c
index 3328fc5c5f50..a71c233d8112 100644
--- a/net/ipv4/netfilter/ip_nat_ftp.c
+++ b/net/ipv4/netfilter/ip_nat_ftp.c
@@ -34,7 +34,7 @@ MODULE_DESCRIPTION("ftp NAT helper");
34 34
35static int 35static int
36mangle_rfc959_packet(struct sk_buff **pskb, 36mangle_rfc959_packet(struct sk_buff **pskb,
37 u_int32_t newip, 37 __be32 newip,
38 u_int16_t port, 38 u_int16_t port,
39 unsigned int matchoff, 39 unsigned int matchoff,
40 unsigned int matchlen, 40 unsigned int matchlen,
@@ -57,7 +57,7 @@ mangle_rfc959_packet(struct sk_buff **pskb,
57/* |1|132.235.1.2|6275| */ 57/* |1|132.235.1.2|6275| */
58static int 58static int
59mangle_eprt_packet(struct sk_buff **pskb, 59mangle_eprt_packet(struct sk_buff **pskb,
60 u_int32_t newip, 60 __be32 newip,
61 u_int16_t port, 61 u_int16_t port,
62 unsigned int matchoff, 62 unsigned int matchoff,
63 unsigned int matchlen, 63 unsigned int matchlen,
@@ -79,7 +79,7 @@ mangle_eprt_packet(struct sk_buff **pskb,
79/* |1|132.235.1.2|6275| */ 79/* |1|132.235.1.2|6275| */
80static int 80static int
81mangle_epsv_packet(struct sk_buff **pskb, 81mangle_epsv_packet(struct sk_buff **pskb,
82 u_int32_t newip, 82 __be32 newip,
83 u_int16_t port, 83 u_int16_t port,
84 unsigned int matchoff, 84 unsigned int matchoff,
85 unsigned int matchlen, 85 unsigned int matchlen,
@@ -98,7 +98,7 @@ mangle_epsv_packet(struct sk_buff **pskb,
98 matchlen, buffer, strlen(buffer)); 98 matchlen, buffer, strlen(buffer));
99} 99}
100 100
101static int (*mangle[])(struct sk_buff **, u_int32_t, u_int16_t, 101static int (*mangle[])(struct sk_buff **, __be32, u_int16_t,
102 unsigned int, 102 unsigned int,
103 unsigned int, 103 unsigned int,
104 struct ip_conntrack *, 104 struct ip_conntrack *,
@@ -120,7 +120,7 @@ static unsigned int ip_nat_ftp(struct sk_buff **pskb,
120 struct ip_conntrack_expect *exp, 120 struct ip_conntrack_expect *exp,
121 u32 *seq) 121 u32 *seq)
122{ 122{
123 u_int32_t newip; 123 __be32 newip;
124 u_int16_t port; 124 u_int16_t port;
125 int dir = CTINFO2DIR(ctinfo); 125 int dir = CTINFO2DIR(ctinfo);
126 struct ip_conntrack *ct = exp->master; 126 struct ip_conntrack *ct = exp->master;
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index 7f6a75984f6c..3bf858480558 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -189,7 +189,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
189 datalen, 0)); 189 datalen, 0));
190 } else 190 } else
191 tcph->check = nf_proto_csum_update(*pskb, 191 tcph->check = nf_proto_csum_update(*pskb,
192 htons(oldlen) ^ 0xFFFF, 192 htons(oldlen) ^ htons(0xFFFF),
193 htons(datalen), 193 htons(datalen),
194 tcph->check, 1); 194 tcph->check, 1);
195 195
@@ -267,7 +267,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
267 udph->check = -1; 267 udph->check = -1;
268 } else 268 } else
269 udph->check = nf_proto_csum_update(*pskb, 269 udph->check = nf_proto_csum_update(*pskb,
270 htons(oldlen) ^ 0xFFFF, 270 htons(oldlen) ^ htons(0xFFFF),
271 htons(datalen), 271 htons(datalen),
272 udph->check, 1); 272 udph->check, 1);
273 return 1; 273 return 1;
@@ -283,27 +283,25 @@ sack_adjust(struct sk_buff *skb,
283 struct ip_nat_seq *natseq) 283 struct ip_nat_seq *natseq)
284{ 284{
285 while (sackoff < sackend) { 285 while (sackoff < sackend) {
286 struct tcp_sack_block *sack; 286 struct tcp_sack_block_wire *sack;
287 u_int32_t new_start_seq, new_end_seq; 287 __be32 new_start_seq, new_end_seq;
288 288
289 sack = (void *)skb->data + sackoff; 289 sack = (void *)skb->data + sackoff;
290 if (after(ntohl(sack->start_seq) - natseq->offset_before, 290 if (after(ntohl(sack->start_seq) - natseq->offset_before,
291 natseq->correction_pos)) 291 natseq->correction_pos))
292 new_start_seq = ntohl(sack->start_seq) 292 new_start_seq = htonl(ntohl(sack->start_seq)
293 - natseq->offset_after; 293 - natseq->offset_after);
294 else 294 else
295 new_start_seq = ntohl(sack->start_seq) 295 new_start_seq = htonl(ntohl(sack->start_seq)
296 - natseq->offset_before; 296 - natseq->offset_before);
297 new_start_seq = htonl(new_start_seq);
298 297
299 if (after(ntohl(sack->end_seq) - natseq->offset_before, 298 if (after(ntohl(sack->end_seq) - natseq->offset_before,
300 natseq->correction_pos)) 299 natseq->correction_pos))
301 new_end_seq = ntohl(sack->end_seq) 300 new_end_seq = htonl(ntohl(sack->end_seq)
302 - natseq->offset_after; 301 - natseq->offset_after);
303 else 302 else
304 new_end_seq = ntohl(sack->end_seq) 303 new_end_seq = htonl(ntohl(sack->end_seq)
305 - natseq->offset_before; 304 - natseq->offset_before);
306 new_end_seq = htonl(new_end_seq);
307 305
308 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", 306 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
309 ntohl(sack->start_seq), new_start_seq, 307 ntohl(sack->start_seq), new_start_seq,
@@ -375,7 +373,8 @@ ip_nat_seq_adjust(struct sk_buff **pskb,
375 enum ip_conntrack_info ctinfo) 373 enum ip_conntrack_info ctinfo)
376{ 374{
377 struct tcphdr *tcph; 375 struct tcphdr *tcph;
378 int dir, newseq, newack; 376 int dir;
377 __be32 newseq, newack;
379 struct ip_nat_seq *this_way, *other_way; 378 struct ip_nat_seq *this_way, *other_way;
380 379
381 dir = CTINFO2DIR(ctinfo); 380 dir = CTINFO2DIR(ctinfo);
@@ -388,17 +387,15 @@ ip_nat_seq_adjust(struct sk_buff **pskb,
388 387
389 tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; 388 tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
390 if (after(ntohl(tcph->seq), this_way->correction_pos)) 389 if (after(ntohl(tcph->seq), this_way->correction_pos))
391 newseq = ntohl(tcph->seq) + this_way->offset_after; 390 newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
392 else 391 else
393 newseq = ntohl(tcph->seq) + this_way->offset_before; 392 newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
394 newseq = htonl(newseq);
395 393
396 if (after(ntohl(tcph->ack_seq) - other_way->offset_before, 394 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
397 other_way->correction_pos)) 395 other_way->correction_pos))
398 newack = ntohl(tcph->ack_seq) - other_way->offset_after; 396 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
399 else 397 else
400 newack = ntohl(tcph->ack_seq) - other_way->offset_before; 398 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
401 newack = htonl(newack);
402 399
403 tcph->check = nf_proto_csum_update(*pskb, ~tcph->seq, newseq, 400 tcph->check = nf_proto_csum_update(*pskb, ~tcph->seq, newseq,
404 tcph->check, 0); 401 tcph->check, 0);
diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c
index 419b878fb467..4a7d34466ee2 100644
--- a/net/ipv4/netfilter/ip_nat_helper_h323.c
+++ b/net/ipv4/netfilter/ip_nat_helper_h323.c
@@ -32,13 +32,13 @@
32/****************************************************************************/ 32/****************************************************************************/
33static int set_addr(struct sk_buff **pskb, 33static int set_addr(struct sk_buff **pskb,
34 unsigned char **data, int dataoff, 34 unsigned char **data, int dataoff,
35 unsigned int addroff, u_int32_t ip, u_int16_t port) 35 unsigned int addroff, __be32 ip, u_int16_t port)
36{ 36{
37 enum ip_conntrack_info ctinfo; 37 enum ip_conntrack_info ctinfo;
38 struct ip_conntrack *ct = ip_conntrack_get(*pskb, &ctinfo); 38 struct ip_conntrack *ct = ip_conntrack_get(*pskb, &ctinfo);
39 struct { 39 struct {
40 u_int32_t ip; 40 __be32 ip;
41 u_int16_t port; 41 __be16 port;
42 } __attribute__ ((__packed__)) buf; 42 } __attribute__ ((__packed__)) buf;
43 struct tcphdr _tcph, *th; 43 struct tcphdr _tcph, *th;
44 44
@@ -86,7 +86,7 @@ static int set_addr(struct sk_buff **pskb,
86static int set_h225_addr(struct sk_buff **pskb, 86static int set_h225_addr(struct sk_buff **pskb,
87 unsigned char **data, int dataoff, 87 unsigned char **data, int dataoff,
88 TransportAddress * addr, 88 TransportAddress * addr,
89 u_int32_t ip, u_int16_t port) 89 __be32 ip, u_int16_t port)
90{ 90{
91 return set_addr(pskb, data, dataoff, addr->ipAddress.ip, ip, port); 91 return set_addr(pskb, data, dataoff, addr->ipAddress.ip, ip, port);
92} 92}
@@ -95,7 +95,7 @@ static int set_h225_addr(struct sk_buff **pskb,
95static int set_h245_addr(struct sk_buff **pskb, 95static int set_h245_addr(struct sk_buff **pskb,
96 unsigned char **data, int dataoff, 96 unsigned char **data, int dataoff,
97 H245_TransportAddress * addr, 97 H245_TransportAddress * addr,
98 u_int32_t ip, u_int16_t port) 98 __be32 ip, u_int16_t port)
99{ 99{
100 return set_addr(pskb, data, dataoff, 100 return set_addr(pskb, data, dataoff,
101 addr->unicastAddress.iPAddress.network, ip, port); 101 addr->unicastAddress.iPAddress.network, ip, port);
@@ -110,7 +110,7 @@ static int set_sig_addr(struct sk_buff **pskb, struct ip_conntrack *ct,
110 struct ip_ct_h323_master *info = &ct->help.ct_h323_info; 110 struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
111 int dir = CTINFO2DIR(ctinfo); 111 int dir = CTINFO2DIR(ctinfo);
112 int i; 112 int i;
113 u_int32_t ip; 113 __be32 ip;
114 u_int16_t port; 114 u_int16_t port;
115 115
116 for (i = 0; i < count; i++) { 116 for (i = 0; i < count; i++) {
@@ -164,7 +164,7 @@ static int set_ras_addr(struct sk_buff **pskb, struct ip_conntrack *ct,
164{ 164{
165 int dir = CTINFO2DIR(ctinfo); 165 int dir = CTINFO2DIR(ctinfo);
166 int i; 166 int i;
167 u_int32_t ip; 167 __be32 ip;
168 u_int16_t port; 168 u_int16_t port;
169 169
170 for (i = 0; i < count; i++) { 170 for (i = 0; i < count; i++) {
@@ -433,7 +433,7 @@ static int nat_q931(struct sk_buff **pskb, struct ip_conntrack *ct,
433 struct ip_ct_h323_master *info = &ct->help.ct_h323_info; 433 struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
434 int dir = CTINFO2DIR(ctinfo); 434 int dir = CTINFO2DIR(ctinfo);
435 u_int16_t nated_port = port; 435 u_int16_t nated_port = port;
436 u_int32_t ip; 436 __be32 ip;
437 437
438 /* Set expectations for NAT */ 438 /* Set expectations for NAT */
439 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 439 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
index 2ff578807123..329fdcd7d702 100644
--- a/net/ipv4/netfilter/ip_nat_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -51,7 +51,7 @@
51 51
52#define IP_NAT_PPTP_VERSION "3.0" 52#define IP_NAT_PPTP_VERSION "3.0"
53 53
54#define REQ_CID(req, off) (*(u_int16_t *)((char *)(req) + (off))) 54#define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off)))
55 55
56MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
57MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); 57MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c
index ec50cc295317..3f6efc13ac74 100644
--- a/net/ipv4/netfilter/ip_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c
@@ -67,7 +67,7 @@ icmp_manip_pkt(struct sk_buff **pskb,
67 67
68 hdr = (struct icmphdr *)((*pskb)->data + hdroff); 68 hdr = (struct icmphdr *)((*pskb)->data + hdroff);
69 hdr->checksum = nf_proto_csum_update(*pskb, 69 hdr->checksum = nf_proto_csum_update(*pskb,
70 hdr->un.echo.id ^ 0xFFFF, 70 hdr->un.echo.id ^ htons(0xFFFF),
71 tuple->src.u.icmp.id, 71 tuple->src.u.icmp.id,
72 hdr->checksum, 0); 72 hdr->checksum, 0);
73 hdr->un.echo.id = tuple->src.u.icmp.id; 73 hdr->un.echo.id = tuple->src.u.icmp.id;
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index 72a6307bd2db..12deb13b93b1 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -24,7 +24,7 @@ tcp_in_range(const struct ip_conntrack_tuple *tuple,
24 const union ip_conntrack_manip_proto *min, 24 const union ip_conntrack_manip_proto *min,
25 const union ip_conntrack_manip_proto *max) 25 const union ip_conntrack_manip_proto *max)
26{ 26{
27 u_int16_t port; 27 __be16 port;
28 28
29 if (maniptype == IP_NAT_MANIP_SRC) 29 if (maniptype == IP_NAT_MANIP_SRC)
30 port = tuple->src.u.tcp.port; 30 port = tuple->src.u.tcp.port;
@@ -42,7 +42,7 @@ tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
42 const struct ip_conntrack *conntrack) 42 const struct ip_conntrack *conntrack)
43{ 43{
44 static u_int16_t port; 44 static u_int16_t port;
45 u_int16_t *portptr; 45 __be16 *portptr;
46 unsigned int range_size, min, i; 46 unsigned int range_size, min, i;
47 47
48 if (maniptype == IP_NAT_MANIP_SRC) 48 if (maniptype == IP_NAT_MANIP_SRC)
@@ -93,8 +93,8 @@ tcp_manip_pkt(struct sk_buff **pskb,
93 struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); 93 struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
94 struct tcphdr *hdr; 94 struct tcphdr *hdr;
95 unsigned int hdroff = iphdroff + iph->ihl*4; 95 unsigned int hdroff = iphdroff + iph->ihl*4;
96 u32 oldip, newip; 96 __be32 oldip, newip;
97 u16 *portptr, newport, oldport; 97 __be16 *portptr, newport, oldport;
98 int hdrsize = 8; /* TCP connection tracking guarantees this much */ 98 int hdrsize = 8; /* TCP connection tracking guarantees this much */
99 99
100 /* this could be a inner header returned in icmp packet; in such 100 /* this could be a inner header returned in icmp packet; in such
@@ -130,7 +130,7 @@ tcp_manip_pkt(struct sk_buff **pskb,
130 return 1; 130 return 1;
131 131
132 hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip, hdr->check, 1); 132 hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip, hdr->check, 1);
133 hdr->check = nf_proto_csum_update(*pskb, oldport ^ 0xFFFF, newport, 133 hdr->check = nf_proto_csum_update(*pskb, oldport ^ htons(0xFFFF), newport,
134 hdr->check, 0); 134 hdr->check, 0);
135 return 1; 135 return 1;
136} 136}
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index 5da196ae758c..4bbec7730d18 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -24,7 +24,7 @@ udp_in_range(const struct ip_conntrack_tuple *tuple,
24 const union ip_conntrack_manip_proto *min, 24 const union ip_conntrack_manip_proto *min,
25 const union ip_conntrack_manip_proto *max) 25 const union ip_conntrack_manip_proto *max)
26{ 26{
27 u_int16_t port; 27 __be16 port;
28 28
29 if (maniptype == IP_NAT_MANIP_SRC) 29 if (maniptype == IP_NAT_MANIP_SRC)
30 port = tuple->src.u.udp.port; 30 port = tuple->src.u.udp.port;
@@ -42,7 +42,7 @@ udp_unique_tuple(struct ip_conntrack_tuple *tuple,
42 const struct ip_conntrack *conntrack) 42 const struct ip_conntrack *conntrack)
43{ 43{
44 static u_int16_t port; 44 static u_int16_t port;
45 u_int16_t *portptr; 45 __be16 *portptr;
46 unsigned int range_size, min, i; 46 unsigned int range_size, min, i;
47 47
48 if (maniptype == IP_NAT_MANIP_SRC) 48 if (maniptype == IP_NAT_MANIP_SRC)
@@ -91,8 +91,8 @@ udp_manip_pkt(struct sk_buff **pskb,
91 struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); 91 struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
92 struct udphdr *hdr; 92 struct udphdr *hdr;
93 unsigned int hdroff = iphdroff + iph->ihl*4; 93 unsigned int hdroff = iphdroff + iph->ihl*4;
94 u32 oldip, newip; 94 __be32 oldip, newip;
95 u16 *portptr, newport; 95 __be16 *portptr, newport;
96 96
97 if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) 97 if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
98 return 0; 98 return 0;
@@ -118,7 +118,7 @@ udp_manip_pkt(struct sk_buff **pskb,
118 hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip, 118 hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip,
119 hdr->check, 1); 119 hdr->check, 1);
120 hdr->check = nf_proto_csum_update(*pskb, 120 hdr->check = nf_proto_csum_update(*pskb,
121 *portptr ^ 0xFFFF, newport, 121 *portptr ^ htons(0xFFFF), newport,
122 hdr->check, 0); 122 hdr->check, 0);
123 if (!hdr->check) 123 if (!hdr->check)
124 hdr->check = -1; 124 hdr->check = -1;
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index 7b703839aa58..a176aa3031e0 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -119,7 +119,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
119} 119}
120 120
121/* Before 2.6.11 we did implicit source NAT if required. Warn about change. */ 121/* Before 2.6.11 we did implicit source NAT if required. Warn about change. */
122static void warn_if_extra_mangle(u32 dstip, u32 srcip) 122static void warn_if_extra_mangle(__be32 dstip, __be32 srcip)
123{ 123{
124 static int warned = 0; 124 static int warned = 0;
125 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } }; 125 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } };
@@ -205,7 +205,7 @@ alloc_null_binding(struct ip_conntrack *conntrack,
205 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 205 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
206 Use reply in case it's already been mangled (eg local packet). 206 Use reply in case it's already been mangled (eg local packet).
207 */ 207 */
208 u_int32_t ip 208 __be32 ip
209 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC 209 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
210 ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip 210 ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip
211 : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); 211 : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
@@ -222,7 +222,7 @@ alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
222 struct ip_nat_info *info, 222 struct ip_nat_info *info,
223 unsigned int hooknum) 223 unsigned int hooknum)
224{ 224{
225 u_int32_t ip 225 __be32 ip
226 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC 226 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
227 ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip 227 ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip
228 : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); 228 : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c
index 6ffba63adca2..71fc2730a007 100644
--- a/net/ipv4/netfilter/ip_nat_sip.c
+++ b/net/ipv4/netfilter/ip_nat_sip.c
@@ -60,8 +60,8 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb,
60 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 60 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
61 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 61 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
62 unsigned int bufflen, dataoff; 62 unsigned int bufflen, dataoff;
63 u_int32_t ip; 63 __be32 ip;
64 u_int16_t port; 64 __be16 port;
65 65
66 dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); 66 dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
67 67
@@ -159,7 +159,7 @@ static int mangle_content_len(struct sk_buff **pskb,
159static unsigned int mangle_sdp(struct sk_buff **pskb, 159static unsigned int mangle_sdp(struct sk_buff **pskb,
160 enum ip_conntrack_info ctinfo, 160 enum ip_conntrack_info ctinfo,
161 struct ip_conntrack *ct, 161 struct ip_conntrack *ct,
162 u_int32_t newip, u_int16_t port, 162 __be32 newip, u_int16_t port,
163 const char *dptr) 163 const char *dptr)
164{ 164{
165 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 165 char buffer[sizeof("nnn.nnn.nnn.nnn")];
@@ -195,7 +195,7 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb,
195{ 195{
196 struct ip_conntrack *ct = exp->master; 196 struct ip_conntrack *ct = exp->master;
197 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 197 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
198 u_int32_t newip; 198 __be32 newip;
199 u_int16_t port; 199 u_int16_t port;
200 200
201 DEBUGP("ip_nat_sdp():\n"); 201 DEBUGP("ip_nat_sdp():\n");
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 18b7fbdccb61..168f45fa1898 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1211,7 +1211,7 @@ static int snmp_translate(struct ip_conntrack *ct,
1211 struct sk_buff **pskb) 1211 struct sk_buff **pskb)
1212{ 1212{
1213 struct iphdr *iph = (*pskb)->nh.iph; 1213 struct iphdr *iph = (*pskb)->nh.iph;
1214 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1214 struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
1215 u_int16_t udplen = ntohs(udph->len); 1215 u_int16_t udplen = ntohs(udph->len);
1216 u_int16_t paylen = udplen - sizeof(struct udphdr); 1216 u_int16_t paylen = udplen - sizeof(struct udphdr);
1217 int dir = CTINFO2DIR(ctinfo); 1217 int dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 9c577db62047..d85d2de50449 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -191,7 +191,7 @@ ip_nat_in(unsigned int hooknum,
191 int (*okfn)(struct sk_buff *)) 191 int (*okfn)(struct sk_buff *))
192{ 192{
193 unsigned int ret; 193 unsigned int ret;
194 u_int32_t daddr = (*pskb)->nh.iph->daddr; 194 __be32 daddr = (*pskb)->nh.iph->daddr;
195 195
196 ret = ip_nat_fn(hooknum, pskb, in, out, okfn); 196 ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
197 if (ret != NF_DROP && ret != NF_STOLEN 197 if (ret != NF_DROP && ret != NF_STOLEN
@@ -265,7 +265,8 @@ ip_nat_local_fn(unsigned int hooknum,
265 ct->tuplehash[!dir].tuple.src.u.all 265 ct->tuplehash[!dir].tuple.src.u.all
266#endif 266#endif
267 ) 267 )
268 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 268 if (ip_route_me_harder(pskb, RTN_UNSPEC))
269 ret = NF_DROP;
269 } 270 }
270 return ret; 271 return ret;
271} 272}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 78a44b01c035..4b90927619b8 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1932,6 +1932,9 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1932{ 1932{
1933 int ret; 1933 int ret;
1934 1934
1935 if (!capable(CAP_NET_ADMIN))
1936 return -EPERM;
1937
1935 switch (cmd) { 1938 switch (cmd) {
1936 case IPT_SO_GET_INFO: 1939 case IPT_SO_GET_INFO:
1937 ret = get_info(user, len, 1); 1940 ret = get_info(user, len, 1);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 41589665fc5d..7a29d6e7baa7 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -52,7 +52,7 @@ struct clusterip_config {
52 atomic_t entries; /* number of entries/rules 52 atomic_t entries; /* number of entries/rules
53 * referencing us */ 53 * referencing us */
54 54
55 u_int32_t clusterip; /* the IP address */ 55 __be32 clusterip; /* the IP address */
56 u_int8_t clustermac[ETH_ALEN]; /* the MAC address */ 56 u_int8_t clustermac[ETH_ALEN]; /* the MAC address */
57 struct net_device *dev; /* device */ 57 struct net_device *dev; /* device */
58 u_int16_t num_total_nodes; /* total number of nodes */ 58 u_int16_t num_total_nodes; /* total number of nodes */
@@ -119,7 +119,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
119} 119}
120 120
121static struct clusterip_config * 121static struct clusterip_config *
122__clusterip_config_find(u_int32_t clusterip) 122__clusterip_config_find(__be32 clusterip)
123{ 123{
124 struct list_head *pos; 124 struct list_head *pos;
125 125
@@ -136,7 +136,7 @@ __clusterip_config_find(u_int32_t clusterip)
136} 136}
137 137
138static inline struct clusterip_config * 138static inline struct clusterip_config *
139clusterip_config_find_get(u_int32_t clusterip, int entry) 139clusterip_config_find_get(__be32 clusterip, int entry)
140{ 140{
141 struct clusterip_config *c; 141 struct clusterip_config *c;
142 142
@@ -166,7 +166,7 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
166} 166}
167 167
168static struct clusterip_config * 168static struct clusterip_config *
169clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip, 169clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip,
170 struct net_device *dev) 170 struct net_device *dev)
171{ 171{
172 struct clusterip_config *c; 172 struct clusterip_config *c;
@@ -387,7 +387,7 @@ checkentry(const char *tablename,
387 return 0; 387 return 0;
388 388
389 } 389 }
390 if (e->ip.dmsk.s_addr != 0xffffffff 390 if (e->ip.dmsk.s_addr != htonl(0xffffffff)
391 || e->ip.dst.s_addr == 0) { 391 || e->ip.dst.s_addr == 0) {
392 printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n"); 392 printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n");
393 return 0; 393 return 0;
@@ -476,9 +476,9 @@ static struct ipt_target clusterip_tgt = {
476/* hardcoded for 48bit ethernet and 32bit ipv4 addresses */ 476/* hardcoded for 48bit ethernet and 32bit ipv4 addresses */
477struct arp_payload { 477struct arp_payload {
478 u_int8_t src_hw[ETH_ALEN]; 478 u_int8_t src_hw[ETH_ALEN];
479 u_int32_t src_ip; 479 __be32 src_ip;
480 u_int8_t dst_hw[ETH_ALEN]; 480 u_int8_t dst_hw[ETH_ALEN];
481 u_int32_t dst_ip; 481 __be32 dst_ip;
482} __attribute__ ((packed)); 482} __attribute__ ((packed));
483 483
484#ifdef CLUSTERIP_DEBUG 484#ifdef CLUSTERIP_DEBUG
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 23f9c7ebe7eb..1aa4517fbcdb 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -37,8 +37,8 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
37 oldtos = iph->tos; 37 oldtos = iph->tos;
38 iph->tos &= ~IPT_ECN_IP_MASK; 38 iph->tos &= ~IPT_ECN_IP_MASK;
39 iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); 39 iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
40 iph->check = nf_csum_update(oldtos ^ 0xFFFF, iph->tos, 40 iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF),
41 iph->check); 41 htons(iph->tos), iph->check);
42 } 42 }
43 return 1; 43 return 1;
44} 44}
@@ -48,7 +48,7 @@ static inline int
48set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) 48set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
49{ 49{
50 struct tcphdr _tcph, *tcph; 50 struct tcphdr _tcph, *tcph;
51 u_int16_t oldval; 51 __be16 oldval;
52 52
53 /* Not enought header? */ 53 /* Not enought header? */
54 tcph = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4, 54 tcph = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4,
@@ -66,15 +66,15 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
66 return 0; 66 return 0;
67 tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4; 67 tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4;
68 68
69 oldval = ((u_int16_t *)tcph)[6]; 69 oldval = ((__be16 *)tcph)[6];
70 if (einfo->operation & IPT_ECN_OP_SET_ECE) 70 if (einfo->operation & IPT_ECN_OP_SET_ECE)
71 tcph->ece = einfo->proto.tcp.ece; 71 tcph->ece = einfo->proto.tcp.ece;
72 if (einfo->operation & IPT_ECN_OP_SET_CWR) 72 if (einfo->operation & IPT_ECN_OP_SET_CWR)
73 tcph->cwr = einfo->proto.tcp.cwr; 73 tcph->cwr = einfo->proto.tcp.cwr;
74 74
75 tcph->check = nf_proto_csum_update((*pskb), 75 tcph->check = nf_proto_csum_update((*pskb),
76 oldval ^ 0xFFFF, 76 oldval ^ htons(0xFFFF),
77 ((u_int16_t *)tcph)[6], 77 ((__be16 *)tcph)[6],
78 tcph->check, 0); 78 tcph->check, 0);
79 return 1; 79 return 1;
80} 80}
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index bc65168a3437..3dbfcfac8a84 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -70,7 +70,7 @@ masquerade_target(struct sk_buff **pskb,
70 const struct ip_nat_multi_range_compat *mr; 70 const struct ip_nat_multi_range_compat *mr;
71 struct ip_nat_range newrange; 71 struct ip_nat_range newrange;
72 struct rtable *rt; 72 struct rtable *rt;
73 u_int32_t newsrc; 73 __be32 newsrc;
74 74
75 IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING); 75 IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
76 76
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index beb2914225ff..58a88f227108 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -58,7 +58,7 @@ target(struct sk_buff **pskb,
58{ 58{
59 struct ip_conntrack *ct; 59 struct ip_conntrack *ct;
60 enum ip_conntrack_info ctinfo; 60 enum ip_conntrack_info ctinfo;
61 u_int32_t new_ip, netmask; 61 __be32 new_ip, netmask;
62 const struct ip_nat_multi_range_compat *mr = targinfo; 62 const struct ip_nat_multi_range_compat *mr = targinfo;
63 struct ip_nat_range newrange; 63 struct ip_nat_range newrange;
64 64
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index f03d43671c6d..c0dcfe9d610c 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -61,7 +61,7 @@ redirect_target(struct sk_buff **pskb,
61{ 61{
62 struct ip_conntrack *ct; 62 struct ip_conntrack *ct;
63 enum ip_conntrack_info ctinfo; 63 enum ip_conntrack_info ctinfo;
64 u_int32_t newdst; 64 __be32 newdst;
65 const struct ip_nat_multi_range_compat *mr = targinfo; 65 const struct ip_nat_multi_range_compat *mr = targinfo;
66 struct ip_nat_range newrange; 66 struct ip_nat_range newrange;
67 67
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b81821edd893..ad0312d0e4fd 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -38,76 +38,16 @@ MODULE_DESCRIPTION("iptables REJECT target module");
38#define DEBUGP(format, args...) 38#define DEBUGP(format, args...)
39#endif 39#endif
40 40
41static inline struct rtable *route_reverse(struct sk_buff *skb,
42 struct tcphdr *tcph, int hook)
43{
44 struct iphdr *iph = skb->nh.iph;
45 struct dst_entry *odst;
46 struct flowi fl = {};
47 struct rtable *rt;
48
49 /* We don't require ip forwarding to be enabled to be able to
50 * send a RST reply for bridged traffic. */
51 if (hook != NF_IP_FORWARD
52#ifdef CONFIG_BRIDGE_NETFILTER
53 || (skb->nf_bridge && skb->nf_bridge->mask & BRNF_BRIDGED)
54#endif
55 ) {
56 fl.nl_u.ip4_u.daddr = iph->saddr;
57 if (hook == NF_IP_LOCAL_IN)
58 fl.nl_u.ip4_u.saddr = iph->daddr;
59 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
60
61 if (ip_route_output_key(&rt, &fl) != 0)
62 return NULL;
63 } else {
64 /* non-local src, find valid iif to satisfy
65 * rp-filter when calling ip_route_input. */
66 fl.nl_u.ip4_u.daddr = iph->daddr;
67 if (ip_route_output_key(&rt, &fl) != 0)
68 return NULL;
69
70 odst = skb->dst;
71 if (ip_route_input(skb, iph->saddr, iph->daddr,
72 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
73 dst_release(&rt->u.dst);
74 return NULL;
75 }
76 dst_release(&rt->u.dst);
77 rt = (struct rtable *)skb->dst;
78 skb->dst = odst;
79
80 fl.nl_u.ip4_u.daddr = iph->saddr;
81 fl.nl_u.ip4_u.saddr = iph->daddr;
82 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
83 }
84
85 if (rt->u.dst.error) {
86 dst_release(&rt->u.dst);
87 return NULL;
88 }
89
90 fl.proto = IPPROTO_TCP;
91 fl.fl_ip_sport = tcph->dest;
92 fl.fl_ip_dport = tcph->source;
93 security_skb_classify_flow(skb, &fl);
94
95 xfrm_lookup((struct dst_entry **)&rt, &fl, NULL, 0);
96
97 return rt;
98}
99
100/* Send RST reply */ 41/* Send RST reply */
101static void send_reset(struct sk_buff *oldskb, int hook) 42static void send_reset(struct sk_buff *oldskb, int hook)
102{ 43{
103 struct sk_buff *nskb; 44 struct sk_buff *nskb;
104 struct iphdr *iph = oldskb->nh.iph; 45 struct iphdr *iph = oldskb->nh.iph;
105 struct tcphdr _otcph, *oth, *tcph; 46 struct tcphdr _otcph, *oth, *tcph;
106 struct rtable *rt; 47 __be16 tmp_port;
107 u_int16_t tmp_port; 48 __be32 tmp_addr;
108 u_int32_t tmp_addr;
109 int needs_ack; 49 int needs_ack;
110 int hh_len; 50 unsigned int addr_type;
111 51
112 /* IP header checks: fragment. */ 52 /* IP header checks: fragment. */
113 if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)) 53 if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
@@ -126,23 +66,13 @@ static void send_reset(struct sk_buff *oldskb, int hook)
126 if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP)) 66 if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP))
127 return; 67 return;
128 68
129 if ((rt = route_reverse(oldskb, oth, hook)) == NULL)
130 return;
131
132 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
133
134 /* We need a linear, writeable skb. We also need to expand 69 /* We need a linear, writeable skb. We also need to expand
135 headroom in case hh_len of incoming interface < hh_len of 70 headroom in case hh_len of incoming interface < hh_len of
136 outgoing interface */ 71 outgoing interface */
137 nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb), 72 nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb),
138 GFP_ATOMIC); 73 GFP_ATOMIC);
139 if (!nskb) { 74 if (!nskb)
140 dst_release(&rt->u.dst);
141 return; 75 return;
142 }
143
144 dst_release(nskb->dst);
145 nskb->dst = &rt->u.dst;
146 76
147 /* This packet will not be the same as the other: clear nf fields */ 77 /* This packet will not be the same as the other: clear nf fields */
148 nf_reset(nskb); 78 nf_reset(nskb);
@@ -184,6 +114,21 @@ static void send_reset(struct sk_buff *oldskb, int hook)
184 tcph->window = 0; 114 tcph->window = 0;
185 tcph->urg_ptr = 0; 115 tcph->urg_ptr = 0;
186 116
117 /* Set DF, id = 0 */
118 nskb->nh.iph->frag_off = htons(IP_DF);
119 nskb->nh.iph->id = 0;
120
121 addr_type = RTN_UNSPEC;
122 if (hook != NF_IP_FORWARD
123#ifdef CONFIG_BRIDGE_NETFILTER
124 || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
125#endif
126 )
127 addr_type = RTN_LOCAL;
128
129 if (ip_route_me_harder(&nskb, addr_type))
130 goto free_nskb;
131
187 /* Adjust TCP checksum */ 132 /* Adjust TCP checksum */
188 nskb->ip_summed = CHECKSUM_NONE; 133 nskb->ip_summed = CHECKSUM_NONE;
189 tcph->check = 0; 134 tcph->check = 0;
@@ -192,12 +137,8 @@ static void send_reset(struct sk_buff *oldskb, int hook)
192 nskb->nh.iph->daddr, 137 nskb->nh.iph->daddr,
193 csum_partial((char *)tcph, 138 csum_partial((char *)tcph,
194 sizeof(struct tcphdr), 0)); 139 sizeof(struct tcphdr), 0));
195 140 /* Adjust IP TTL */
196 /* Adjust IP TTL, DF */
197 nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); 141 nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
198 /* Set DF, id = 0 */
199 nskb->nh.iph->frag_off = htons(IP_DF);
200 nskb->nh.iph->id = 0;
201 142
202 /* Adjust IP checksum */ 143 /* Adjust IP checksum */
203 nskb->nh.iph->check = 0; 144 nskb->nh.iph->check = 0;
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index efbcb1198832..b38b13328d73 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -135,7 +135,8 @@ same_target(struct sk_buff **pskb,
135{ 135{
136 struct ip_conntrack *ct; 136 struct ip_conntrack *ct;
137 enum ip_conntrack_info ctinfo; 137 enum ip_conntrack_info ctinfo;
138 u_int32_t tmpip, aindex, new_ip; 138 u_int32_t tmpip, aindex;
139 __be32 new_ip;
139 const struct ipt_same_info *same = targinfo; 140 const struct ipt_same_info *same = targinfo;
140 struct ip_nat_range newrange; 141 struct ip_nat_range newrange;
141 const struct ip_conntrack_tuple *t; 142 const struct ip_conntrack_tuple *t;
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
index 4246c4321e5b..108b6b76311f 100644
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ b/net/ipv4/netfilter/ipt_TCPMSS.c
@@ -42,7 +42,8 @@ ipt_tcpmss_target(struct sk_buff **pskb,
42 const struct ipt_tcpmss_info *tcpmssinfo = targinfo; 42 const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
43 struct tcphdr *tcph; 43 struct tcphdr *tcph;
44 struct iphdr *iph; 44 struct iphdr *iph;
45 u_int16_t tcplen, newtotlen, oldval, newmss; 45 u_int16_t tcplen, newmss;
46 __be16 newtotlen, oldval;
46 unsigned int i; 47 unsigned int i;
47 u_int8_t *opt; 48 u_int8_t *opt;
48 49
@@ -97,7 +98,7 @@ ipt_tcpmss_target(struct sk_buff **pskb,
97 opt[i+3] = (newmss & 0x00ff); 98 opt[i+3] = (newmss & 0x00ff);
98 99
99 tcph->check = nf_proto_csum_update(*pskb, 100 tcph->check = nf_proto_csum_update(*pskb,
100 htons(oldmss)^0xFFFF, 101 htons(oldmss)^htons(0xFFFF),
101 htons(newmss), 102 htons(newmss),
102 tcph->check, 0); 103 tcph->check, 0);
103 return IPT_CONTINUE; 104 return IPT_CONTINUE;
@@ -126,7 +127,7 @@ ipt_tcpmss_target(struct sk_buff **pskb,
126 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 127 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
127 128
128 tcph->check = nf_proto_csum_update(*pskb, 129 tcph->check = nf_proto_csum_update(*pskb,
129 htons(tcplen) ^ 0xFFFF, 130 htons(tcplen) ^ htons(0xFFFF),
130 htons(tcplen + TCPOLEN_MSS), 131 htons(tcplen + TCPOLEN_MSS),
131 tcph->check, 1); 132 tcph->check, 1);
132 opt[0] = TCPOPT_MSS; 133 opt[0] = TCPOPT_MSS;
@@ -134,18 +135,18 @@ ipt_tcpmss_target(struct sk_buff **pskb,
134 opt[2] = (newmss & 0xff00) >> 8; 135 opt[2] = (newmss & 0xff00) >> 8;
135 opt[3] = (newmss & 0x00ff); 136 opt[3] = (newmss & 0x00ff);
136 137
137 tcph->check = nf_proto_csum_update(*pskb, ~0, *((u_int32_t *)opt), 138 tcph->check = nf_proto_csum_update(*pskb, htonl(~0), *((__be32 *)opt),
138 tcph->check, 0); 139 tcph->check, 0);
139 140
140 oldval = ((u_int16_t *)tcph)[6]; 141 oldval = ((__be16 *)tcph)[6];
141 tcph->doff += TCPOLEN_MSS/4; 142 tcph->doff += TCPOLEN_MSS/4;
142 tcph->check = nf_proto_csum_update(*pskb, 143 tcph->check = nf_proto_csum_update(*pskb,
143 oldval ^ 0xFFFF, 144 oldval ^ htons(0xFFFF),
144 ((u_int16_t *)tcph)[6], 145 ((__be16 *)tcph)[6],
145 tcph->check, 0); 146 tcph->check, 0);
146 147
147 newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS); 148 newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS);
148 iph->check = nf_csum_update(iph->tot_len ^ 0xFFFF, 149 iph->check = nf_csum_update(iph->tot_len ^ htons(0xFFFF),
149 newtotlen, iph->check); 150 newtotlen, iph->check);
150 iph->tot_len = newtotlen; 151 iph->tot_len = newtotlen;
151 return IPT_CONTINUE; 152 return IPT_CONTINUE;
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 471a4c438b0a..83b80b3a5d2f 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -38,8 +38,8 @@ target(struct sk_buff **pskb,
38 iph = (*pskb)->nh.iph; 38 iph = (*pskb)->nh.iph;
39 oldtos = iph->tos; 39 oldtos = iph->tos;
40 iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; 40 iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
41 iph->check = nf_csum_update(oldtos ^ 0xFFFF, iph->tos, 41 iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF),
42 iph->check); 42 htons(iph->tos), iph->check);
43 } 43 }
44 return IPT_CONTINUE; 44 return IPT_CONTINUE;
45} 45}
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index 96e79cc6d0f2..ac9517d62af0 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -54,8 +54,8 @@ ipt_ttl_target(struct sk_buff **pskb,
54 } 54 }
55 55
56 if (new_ttl != iph->ttl) { 56 if (new_ttl != iph->ttl) {
57 iph->check = nf_csum_update(ntohs((iph->ttl << 8)) ^ 0xFFFF, 57 iph->check = nf_csum_update(htons((iph->ttl << 8)) ^ htons(0xFFFF),
58 ntohs(new_ttl << 8), 58 htons(new_ttl << 8),
59 iph->check); 59 iph->check);
60 iph->ttl = new_ttl; 60 iph->ttl = new_ttl;
61 } 61 }
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 893dae210b04..7b60eb74788b 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -22,7 +22,7 @@ MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
23MODULE_DESCRIPTION("iptables addrtype match"); 23MODULE_DESCRIPTION("iptables addrtype match");
24 24
25static inline int match_type(u_int32_t addr, u_int16_t mask) 25static inline int match_type(__be32 addr, u_int16_t mask)
26{ 26{
27 return !!(mask & (1 << inet_addr_type(addr))); 27 return !!(mask & (1 << inet_addr_type(addr)));
28} 28}
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index 4f73a61aa3dd..33ccdbf8e794 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -50,11 +50,11 @@ static struct file_operations dl_file_ops;
50/* hash table crap */ 50/* hash table crap */
51 51
52struct dsthash_dst { 52struct dsthash_dst {
53 u_int32_t src_ip; 53 __be32 src_ip;
54 u_int32_t dst_ip; 54 __be32 dst_ip;
55 /* ports have to be consecutive !!! */ 55 /* ports have to be consecutive !!! */
56 u_int16_t src_port; 56 __be16 src_port;
57 u_int16_t dst_port; 57 __be16 dst_port;
58}; 58};
59 59
60struct dsthash_ent { 60struct dsthash_ent {
@@ -106,8 +106,10 @@ static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
106static inline u_int32_t 106static inline u_int32_t
107hash_dst(const struct ipt_hashlimit_htable *ht, const struct dsthash_dst *dst) 107hash_dst(const struct ipt_hashlimit_htable *ht, const struct dsthash_dst *dst)
108{ 108{
109 return (jhash_3words(dst->dst_ip, (dst->dst_port<<16 | dst->src_port), 109 return (jhash_3words((__force u32)dst->dst_ip,
110 dst->src_ip, ht->rnd) % ht->cfg.size); 110 ((__force u32)dst->dst_port<<16 |
111 (__force u32)dst->src_port),
112 (__force u32)dst->src_ip, ht->rnd) % ht->cfg.size);
111} 113}
112 114
113static inline struct dsthash_ent * 115static inline struct dsthash_ent *
@@ -406,7 +408,7 @@ hashlimit_match(const struct sk_buff *skb,
406 dst.src_ip = skb->nh.iph->saddr; 408 dst.src_ip = skb->nh.iph->saddr;
407 if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DPT 409 if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DPT
408 ||hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SPT) { 410 ||hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SPT) {
409 u_int16_t _ports[2], *ports; 411 __be16 _ports[2], *ports;
410 412
411 switch (skb->nh.iph->protocol) { 413 switch (skb->nh.iph->protocol) {
412 case IPPROTO_TCP: 414 case IPPROTO_TCP:
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 32ae8d7ac506..126db44e71a8 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -50,11 +50,10 @@ MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/ipt_recent/* files");
50MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/ipt_recent/* files"); 50MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/ipt_recent/* files");
51MODULE_PARM_DESC(ip_list_gid,"owning group of /proc/net/ipt_recent/* files"); 51MODULE_PARM_DESC(ip_list_gid,"owning group of /proc/net/ipt_recent/* files");
52 52
53
54struct recent_entry { 53struct recent_entry {
55 struct list_head list; 54 struct list_head list;
56 struct list_head lru_list; 55 struct list_head lru_list;
57 u_int32_t addr; 56 __be32 addr;
58 u_int8_t ttl; 57 u_int8_t ttl;
59 u_int8_t index; 58 u_int8_t index;
60 u_int16_t nstamps; 59 u_int16_t nstamps;
@@ -85,17 +84,17 @@ static struct file_operations recent_fops;
85static u_int32_t hash_rnd; 84static u_int32_t hash_rnd;
86static int hash_rnd_initted; 85static int hash_rnd_initted;
87 86
88static unsigned int recent_entry_hash(u_int32_t addr) 87static unsigned int recent_entry_hash(__be32 addr)
89{ 88{
90 if (!hash_rnd_initted) { 89 if (!hash_rnd_initted) {
91 get_random_bytes(&hash_rnd, 4); 90 get_random_bytes(&hash_rnd, 4);
92 hash_rnd_initted = 1; 91 hash_rnd_initted = 1;
93 } 92 }
94 return jhash_1word(addr, hash_rnd) & (ip_list_hash_size - 1); 93 return jhash_1word((__force u32)addr, hash_rnd) & (ip_list_hash_size - 1);
95} 94}
96 95
97static struct recent_entry * 96static struct recent_entry *
98recent_entry_lookup(const struct recent_table *table, u_int32_t addr, u_int8_t ttl) 97recent_entry_lookup(const struct recent_table *table, __be32 addr, u_int8_t ttl)
99{ 98{
100 struct recent_entry *e; 99 struct recent_entry *e;
101 unsigned int h; 100 unsigned int h;
@@ -116,7 +115,7 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
116} 115}
117 116
118static struct recent_entry * 117static struct recent_entry *
119recent_entry_init(struct recent_table *t, u_int32_t addr, u_int8_t ttl) 118recent_entry_init(struct recent_table *t, __be32 addr, u_int8_t ttl)
120{ 119{
121 struct recent_entry *e; 120 struct recent_entry *e;
122 121
@@ -178,7 +177,7 @@ ipt_recent_match(const struct sk_buff *skb,
178 const struct ipt_recent_info *info = matchinfo; 177 const struct ipt_recent_info *info = matchinfo;
179 struct recent_table *t; 178 struct recent_table *t;
180 struct recent_entry *e; 179 struct recent_entry *e;
181 u_int32_t addr; 180 __be32 addr;
182 u_int8_t ttl; 181 u_int8_t ttl;
183 int ret = info->invert; 182 int ret = info->invert;
184 183
@@ -406,7 +405,7 @@ static ssize_t recent_proc_write(struct file *file, const char __user *input,
406 struct recent_table *t = pde->data; 405 struct recent_table *t = pde->data;
407 struct recent_entry *e; 406 struct recent_entry *e;
408 char buf[sizeof("+255.255.255.255")], *c = buf; 407 char buf[sizeof("+255.255.255.255")], *c = buf;
409 u_int32_t addr; 408 __be32 addr;
410 int add; 409 int add;
411 410
412 if (size > sizeof(buf)) 411 if (size > sizeof(buf))
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 79336cb42527..b91f3582359b 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -131,7 +131,7 @@ ipt_local_hook(unsigned int hook,
131{ 131{
132 unsigned int ret; 132 unsigned int ret;
133 u_int8_t tos; 133 u_int8_t tos;
134 u_int32_t saddr, daddr; 134 __be32 saddr, daddr;
135 unsigned long nfmark; 135 unsigned long nfmark;
136 136
137 /* root is playing with raw sockets. */ 137 /* root is playing with raw sockets. */
@@ -157,7 +157,8 @@ ipt_local_hook(unsigned int hook,
157 || (*pskb)->nfmark != nfmark 157 || (*pskb)->nfmark != nfmark
158#endif 158#endif
159 || (*pskb)->nh.iph->tos != tos)) 159 || (*pskb)->nh.iph->tos != tos))
160 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 160 if (ip_route_me_harder(pskb, RTN_UNSPEC))
161 ret = NF_DROP;
161 162
162 return ret; 163 return ret;
163} 164}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 0e935b4c8741..b430cf2a4f66 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -381,8 +381,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
381 struct ipcm_cookie ipc; 381 struct ipcm_cookie ipc;
382 struct rtable *rt = NULL; 382 struct rtable *rt = NULL;
383 int free = 0; 383 int free = 0;
384 u32 daddr; 384 __be32 daddr;
385 u32 saddr; 385 __be32 saddr;
386 u8 tos; 386 u8 tos;
387 int err; 387 int err;
388 388
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 20ffe8e88c0f..925ee4dfc32c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -261,6 +261,10 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr)
261 & rt_hash_mask); 261 & rt_hash_mask);
262} 262}
263 263
264#define rt_hash(daddr, saddr, idx) \
265 rt_hash_code((__force u32)(__be32)(daddr),\
266 (__force u32)(__be32)(saddr) ^ ((idx) << 5))
267
264#ifdef CONFIG_PROC_FS 268#ifdef CONFIG_PROC_FS
265struct rt_cache_iter_state { 269struct rt_cache_iter_state {
266 int bucket; 270 int bucket;
@@ -562,9 +566,15 @@ static inline u32 rt_score(struct rtable *rt)
562 566
563static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 567static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
564{ 568{
565 return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 && 569 return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
566 fl1->oif == fl2->oif && 570 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
567 fl1->iif == fl2->iif; 571#ifdef CONFIG_IP_ROUTE_FWMARK
572 (fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) |
573#endif
574 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
575 *(u16 *)&fl2->nl_u.ip4_u.tos) |
576 (fl1->oif ^ fl2->oif) |
577 (fl1->iif ^ fl2->iif)) == 0;
568} 578}
569 579
570#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 580#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
@@ -1074,7 +1084,7 @@ static void ip_select_fb_ident(struct iphdr *iph)
1074 u32 salt; 1084 u32 salt;
1075 1085
1076 spin_lock_bh(&ip_fb_id_lock); 1086 spin_lock_bh(&ip_fb_id_lock);
1077 salt = secure_ip_id(ip_fallback_id ^ iph->daddr); 1087 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1078 iph->id = htons(salt & 0xFFFF); 1088 iph->id = htons(salt & 0xFFFF);
1079 ip_fallback_id = salt; 1089 ip_fallback_id = salt;
1080 spin_unlock_bh(&ip_fb_id_lock); 1090 spin_unlock_bh(&ip_fb_id_lock);
@@ -1118,13 +1128,13 @@ static void rt_del(unsigned hash, struct rtable *rt)
1118 spin_unlock_bh(rt_hash_lock_addr(hash)); 1128 spin_unlock_bh(rt_hash_lock_addr(hash));
1119} 1129}
1120 1130
1121void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, 1131void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1122 u32 saddr, struct net_device *dev) 1132 __be32 saddr, struct net_device *dev)
1123{ 1133{
1124 int i, k; 1134 int i, k;
1125 struct in_device *in_dev = in_dev_get(dev); 1135 struct in_device *in_dev = in_dev_get(dev);
1126 struct rtable *rth, **rthp; 1136 struct rtable *rth, **rthp;
1127 u32 skeys[2] = { saddr, 0 }; 1137 __be32 skeys[2] = { saddr, 0 };
1128 int ikeys[2] = { dev->ifindex, 0 }; 1138 int ikeys[2] = { dev->ifindex, 0 };
1129 struct netevent_redirect netevent; 1139 struct netevent_redirect netevent;
1130 1140
@@ -1147,8 +1157,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1147 1157
1148 for (i = 0; i < 2; i++) { 1158 for (i = 0; i < 2; i++) {
1149 for (k = 0; k < 2; k++) { 1159 for (k = 0; k < 2; k++) {
1150 unsigned hash = rt_hash_code(daddr, 1160 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
1151 skeys[i] ^ (ikeys[k] << 5));
1152 1161
1153 rthp=&rt_hash_table[hash].chain; 1162 rthp=&rt_hash_table[hash].chain;
1154 1163
@@ -1260,9 +1269,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1260 ret = NULL; 1269 ret = NULL;
1261 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1270 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1262 rt->u.dst.expires) { 1271 rt->u.dst.expires) {
1263 unsigned hash = rt_hash_code(rt->fl.fl4_dst, 1272 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1264 rt->fl.fl4_src ^ 1273 rt->fl.oif);
1265 (rt->fl.oif << 5));
1266#if RT_CACHE_DEBUG >= 1 1274#if RT_CACHE_DEBUG >= 1
1267 printk(KERN_DEBUG "ip_rt_advice: redirect to " 1275 printk(KERN_DEBUG "ip_rt_advice: redirect to "
1268 "%u.%u.%u.%u/%02x dropped\n", 1276 "%u.%u.%u.%u/%02x dropped\n",
@@ -1397,15 +1405,15 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1397 int i; 1405 int i;
1398 unsigned short old_mtu = ntohs(iph->tot_len); 1406 unsigned short old_mtu = ntohs(iph->tot_len);
1399 struct rtable *rth; 1407 struct rtable *rth;
1400 u32 skeys[2] = { iph->saddr, 0, }; 1408 __be32 skeys[2] = { iph->saddr, 0, };
1401 u32 daddr = iph->daddr; 1409 __be32 daddr = iph->daddr;
1402 unsigned short est_mtu = 0; 1410 unsigned short est_mtu = 0;
1403 1411
1404 if (ipv4_config.no_pmtu_disc) 1412 if (ipv4_config.no_pmtu_disc)
1405 return 0; 1413 return 0;
1406 1414
1407 for (i = 0; i < 2; i++) { 1415 for (i = 0; i < 2; i++) {
1408 unsigned hash = rt_hash_code(daddr, skeys[i]); 1416 unsigned hash = rt_hash(daddr, skeys[i], 0);
1409 1417
1410 rcu_read_lock(); 1418 rcu_read_lock();
1411 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1419 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -1530,7 +1538,7 @@ static int ip_rt_bug(struct sk_buff *skb)
1530 1538
1531void ip_rt_get_source(u8 *addr, struct rtable *rt) 1539void ip_rt_get_source(u8 *addr, struct rtable *rt)
1532{ 1540{
1533 u32 src; 1541 __be32 src;
1534 struct fib_result res; 1542 struct fib_result res;
1535 1543
1536 if (rt->fl.iif == 0) 1544 if (rt->fl.iif == 0)
@@ -1596,12 +1604,12 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1596 rt->rt_type = res->type; 1604 rt->rt_type = res->type;
1597} 1605}
1598 1606
1599static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr, 1607static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1600 u8 tos, struct net_device *dev, int our) 1608 u8 tos, struct net_device *dev, int our)
1601{ 1609{
1602 unsigned hash; 1610 unsigned hash;
1603 struct rtable *rth; 1611 struct rtable *rth;
1604 u32 spec_dst; 1612 __be32 spec_dst;
1605 struct in_device *in_dev = in_dev_get(dev); 1613 struct in_device *in_dev = in_dev_get(dev);
1606 u32 itag = 0; 1614 u32 itag = 0;
1607 1615
@@ -1665,7 +1673,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
1665 RT_CACHE_STAT_INC(in_slow_mc); 1673 RT_CACHE_STAT_INC(in_slow_mc);
1666 1674
1667 in_dev_put(in_dev); 1675 in_dev_put(in_dev);
1668 hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5)); 1676 hash = rt_hash(daddr, saddr, dev->ifindex);
1669 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); 1677 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1670 1678
1671e_nobufs: 1679e_nobufs:
@@ -1681,8 +1689,8 @@ e_inval:
1681static void ip_handle_martian_source(struct net_device *dev, 1689static void ip_handle_martian_source(struct net_device *dev,
1682 struct in_device *in_dev, 1690 struct in_device *in_dev,
1683 struct sk_buff *skb, 1691 struct sk_buff *skb,
1684 u32 daddr, 1692 __be32 daddr,
1685 u32 saddr) 1693 __be32 saddr)
1686{ 1694{
1687 RT_CACHE_STAT_INC(in_martian_src); 1695 RT_CACHE_STAT_INC(in_martian_src);
1688#ifdef CONFIG_IP_ROUTE_VERBOSE 1696#ifdef CONFIG_IP_ROUTE_VERBOSE
@@ -1712,7 +1720,7 @@ static void ip_handle_martian_source(struct net_device *dev,
1712static inline int __mkroute_input(struct sk_buff *skb, 1720static inline int __mkroute_input(struct sk_buff *skb,
1713 struct fib_result* res, 1721 struct fib_result* res,
1714 struct in_device *in_dev, 1722 struct in_device *in_dev,
1715 u32 daddr, u32 saddr, u32 tos, 1723 __be32 daddr, __be32 saddr, u32 tos,
1716 struct rtable **result) 1724 struct rtable **result)
1717{ 1725{
1718 1726
@@ -1720,7 +1728,8 @@ static inline int __mkroute_input(struct sk_buff *skb,
1720 int err; 1728 int err;
1721 struct in_device *out_dev; 1729 struct in_device *out_dev;
1722 unsigned flags = 0; 1730 unsigned flags = 0;
1723 u32 spec_dst, itag; 1731 __be32 spec_dst;
1732 u32 itag;
1724 1733
1725 /* get a working reference to the output device */ 1734 /* get a working reference to the output device */
1726 out_dev = in_dev_get(FIB_RES_DEV(*res)); 1735 out_dev = in_dev_get(FIB_RES_DEV(*res));
@@ -1813,7 +1822,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1813 struct fib_result* res, 1822 struct fib_result* res,
1814 const struct flowi *fl, 1823 const struct flowi *fl,
1815 struct in_device *in_dev, 1824 struct in_device *in_dev,
1816 u32 daddr, u32 saddr, u32 tos) 1825 __be32 daddr, __be32 saddr, u32 tos)
1817{ 1826{
1818 struct rtable* rth = NULL; 1827 struct rtable* rth = NULL;
1819 int err; 1828 int err;
@@ -1830,7 +1839,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1830 return err; 1839 return err;
1831 1840
1832 /* put it into the cache */ 1841 /* put it into the cache */
1833 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5)); 1842 hash = rt_hash(daddr, saddr, fl->iif);
1834 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1843 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1835} 1844}
1836 1845
@@ -1838,7 +1847,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1838 struct fib_result* res, 1847 struct fib_result* res,
1839 const struct flowi *fl, 1848 const struct flowi *fl,
1840 struct in_device *in_dev, 1849 struct in_device *in_dev,
1841 u32 daddr, u32 saddr, u32 tos) 1850 __be32 daddr, __be32 saddr, u32 tos)
1842{ 1851{
1843#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 1852#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1844 struct rtable* rth = NULL, *rtres; 1853 struct rtable* rth = NULL, *rtres;
@@ -1871,7 +1880,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1871 return err; 1880 return err;
1872 1881
1873 /* put it into the cache */ 1882 /* put it into the cache */
1874 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5)); 1883 hash = rt_hash(daddr, saddr, fl->iif);
1875 err = rt_intern_hash(hash, rth, &rtres); 1884 err = rt_intern_hash(hash, rth, &rtres);
1876 if (err) 1885 if (err)
1877 return err; 1886 return err;
@@ -1901,7 +1910,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1901 * 2. IP spoofing attempts are filtered with 100% of guarantee. 1910 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1902 */ 1911 */
1903 1912
1904static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr, 1913static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1905 u8 tos, struct net_device *dev) 1914 u8 tos, struct net_device *dev)
1906{ 1915{
1907 struct fib_result res; 1916 struct fib_result res;
@@ -1920,7 +1929,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
1920 u32 itag = 0; 1929 u32 itag = 0;
1921 struct rtable * rth; 1930 struct rtable * rth;
1922 unsigned hash; 1931 unsigned hash;
1923 u32 spec_dst; 1932 __be32 spec_dst;
1924 int err = -EINVAL; 1933 int err = -EINVAL;
1925 int free_res = 0; 1934 int free_res = 0;
1926 1935
@@ -1936,7 +1945,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
1936 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr)) 1945 if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
1937 goto martian_source; 1946 goto martian_source;
1938 1947
1939 if (daddr == 0xFFFFFFFF || (saddr == 0 && daddr == 0)) 1948 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
1940 goto brd_input; 1949 goto brd_input;
1941 1950
1942 /* Accept zero addresses only to limited broadcast; 1951 /* Accept zero addresses only to limited broadcast;
@@ -2048,7 +2057,7 @@ local_input:
2048 rth->rt_flags &= ~RTCF_LOCAL; 2057 rth->rt_flags &= ~RTCF_LOCAL;
2049 } 2058 }
2050 rth->rt_type = res.type; 2059 rth->rt_type = res.type;
2051 hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5)); 2060 hash = rt_hash(daddr, saddr, fl.iif);
2052 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 2061 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
2053 goto done; 2062 goto done;
2054 2063
@@ -2087,7 +2096,7 @@ martian_source:
2087 goto e_inval; 2096 goto e_inval;
2088} 2097}
2089 2098
2090int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr, 2099int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2091 u8 tos, struct net_device *dev) 2100 u8 tos, struct net_device *dev)
2092{ 2101{
2093 struct rtable * rth; 2102 struct rtable * rth;
@@ -2095,7 +2104,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
2095 int iif = dev->ifindex; 2104 int iif = dev->ifindex;
2096 2105
2097 tos &= IPTOS_RT_MASK; 2106 tos &= IPTOS_RT_MASK;
2098 hash = rt_hash_code(daddr, saddr ^ (iif << 5)); 2107 hash = rt_hash(daddr, saddr, iif);
2099 2108
2100 rcu_read_lock(); 2109 rcu_read_lock();
2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2110 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2169,7 +2178,7 @@ static inline int __mkroute_output(struct rtable **result,
2169 if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK)) 2178 if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2170 return -EINVAL; 2179 return -EINVAL;
2171 2180
2172 if (fl->fl4_dst == 0xFFFFFFFF) 2181 if (fl->fl4_dst == htonl(0xFFFFFFFF))
2173 res->type = RTN_BROADCAST; 2182 res->type = RTN_BROADCAST;
2174 else if (MULTICAST(fl->fl4_dst)) 2183 else if (MULTICAST(fl->fl4_dst))
2175 res->type = RTN_MULTICAST; 2184 res->type = RTN_MULTICAST;
@@ -2293,8 +2302,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2293 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2302 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2294 unsigned hash; 2303 unsigned hash;
2295 if (err == 0) { 2304 if (err == 0) {
2296 hash = rt_hash_code(oldflp->fl4_dst, 2305 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2297 oldflp->fl4_src ^ (oldflp->oif << 5));
2298 err = rt_intern_hash(hash, rth, rp); 2306 err = rt_intern_hash(hash, rth, rp);
2299 } 2307 }
2300 2308
@@ -2336,9 +2344,8 @@ static inline int ip_mkroute_output(struct rtable** rp,
2336 if (err != 0) 2344 if (err != 0)
2337 goto cleanup; 2345 goto cleanup;
2338 2346
2339 hash = rt_hash_code(oldflp->fl4_dst, 2347 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src,
2340 oldflp->fl4_src ^ 2348 oldflp->oif);
2341 (oldflp->oif << 5));
2342 err = rt_intern_hash(hash, rth, rp); 2349 err = rt_intern_hash(hash, rth, rp);
2343 2350
2344 /* forward hop information to multipath impl. */ 2351 /* forward hop information to multipath impl. */
@@ -2417,7 +2424,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2417 */ 2424 */
2418 2425
2419 if (oldflp->oif == 0 2426 if (oldflp->oif == 0
2420 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) { 2427 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2421 /* Special hack: user can direct multicasts 2428 /* Special hack: user can direct multicasts
2422 and limited broadcast via necessary interface 2429 and limited broadcast via necessary interface
2423 without fiddling with IP_MULTICAST_IF or IP_PKTINFO. 2430 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
@@ -2454,7 +2461,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2454 goto out; /* Wrong error code */ 2461 goto out; /* Wrong error code */
2455 } 2462 }
2456 2463
2457 if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) { 2464 if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2458 if (!fl.fl4_src) 2465 if (!fl.fl4_src)
2459 fl.fl4_src = inet_select_addr(dev_out, 0, 2466 fl.fl4_src = inet_select_addr(dev_out, 0,
2460 RT_SCOPE_LINK); 2467 RT_SCOPE_LINK);
@@ -2567,7 +2574,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2567 unsigned hash; 2574 unsigned hash;
2568 struct rtable *rth; 2575 struct rtable *rth;
2569 2576
2570 hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5)); 2577 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
2571 2578
2572 rcu_read_lock_bh(); 2579 rcu_read_lock_bh();
2573 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2580 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2660,11 +2667,11 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2660 if (rt->rt_flags & RTCF_NOTIFY) 2667 if (rt->rt_flags & RTCF_NOTIFY)
2661 r->rtm_flags |= RTM_F_NOTIFY; 2668 r->rtm_flags |= RTM_F_NOTIFY;
2662 2669
2663 NLA_PUT_U32(skb, RTA_DST, rt->rt_dst); 2670 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2664 2671
2665 if (rt->fl.fl4_src) { 2672 if (rt->fl.fl4_src) {
2666 r->rtm_src_len = 32; 2673 r->rtm_src_len = 32;
2667 NLA_PUT_U32(skb, RTA_SRC, rt->fl.fl4_src); 2674 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2668 } 2675 }
2669 if (rt->u.dst.dev) 2676 if (rt->u.dst.dev)
2670 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); 2677 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
@@ -2677,12 +2684,12 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2677 NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg); 2684 NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg);
2678#endif 2685#endif
2679 if (rt->fl.iif) 2686 if (rt->fl.iif)
2680 NLA_PUT_U32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2687 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2681 else if (rt->rt_src != rt->fl.fl4_src) 2688 else if (rt->rt_src != rt->fl.fl4_src)
2682 NLA_PUT_U32(skb, RTA_PREFSRC, rt->rt_src); 2689 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2683 2690
2684 if (rt->rt_dst != rt->rt_gateway) 2691 if (rt->rt_dst != rt->rt_gateway)
2685 NLA_PUT_U32(skb, RTA_GATEWAY, rt->rt_gateway); 2692 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2686 2693
2687 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 2694 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2688 goto nla_put_failure; 2695 goto nla_put_failure;
@@ -2706,7 +2713,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2706 2713
2707 if (rt->fl.iif) { 2714 if (rt->fl.iif) {
2708#ifdef CONFIG_IP_MROUTE 2715#ifdef CONFIG_IP_MROUTE
2709 u32 dst = rt->rt_dst; 2716 __be32 dst = rt->rt_dst;
2710 2717
2711 if (MULTICAST(dst) && !LOCAL_MCAST(dst) && 2718 if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
2712 ipv4_devconf.mc_forwarding) { 2719 ipv4_devconf.mc_forwarding) {
@@ -2740,7 +2747,9 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2740 struct rtmsg *rtm; 2747 struct rtmsg *rtm;
2741 struct nlattr *tb[RTA_MAX+1]; 2748 struct nlattr *tb[RTA_MAX+1];
2742 struct rtable *rt = NULL; 2749 struct rtable *rt = NULL;
2743 u32 dst, src, iif; 2750 __be32 dst = 0;
2751 __be32 src = 0;
2752 u32 iif;
2744 int err; 2753 int err;
2745 struct sk_buff *skb; 2754 struct sk_buff *skb;
2746 2755
@@ -2765,8 +2774,8 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2765 skb->nh.iph->protocol = IPPROTO_ICMP; 2774 skb->nh.iph->protocol = IPPROTO_ICMP;
2766 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); 2775 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2767 2776
2768 src = tb[RTA_SRC] ? nla_get_u32(tb[RTA_SRC]) : 0; 2777 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2769 dst = tb[RTA_DST] ? nla_get_u32(tb[RTA_DST]) : 0; 2778 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2770 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 2779 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2771 2780
2772 if (iif) { 2781 if (iif) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b3def0df14fb..cf06accbe687 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -935,7 +935,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
935 const struct inet_connection_sock *icsk = inet_csk(sk); 935 const struct inet_connection_sock *icsk = inet_csk(sk);
936 struct tcp_sock *tp = tcp_sk(sk); 936 struct tcp_sock *tp = tcp_sk(sk);
937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
938 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2); 938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
939 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; 939 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
940 int reord = tp->packets_out; 940 int reord = tp->packets_out;
941 int prior_fackets; 941 int prior_fackets;
@@ -2239,13 +2239,12 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2239 return acked; 2239 return acked;
2240} 2240}
2241 2241
2242static u32 tcp_usrtt(const struct sk_buff *skb) 2242static u32 tcp_usrtt(struct timeval *tv)
2243{ 2243{
2244 struct timeval tv, now; 2244 struct timeval now;
2245 2245
2246 do_gettimeofday(&now); 2246 do_gettimeofday(&now);
2247 skb_get_timestamp(skb, &tv); 2247 return (now.tv_sec - tv->tv_sec) * 1000000 + (now.tv_usec - tv->tv_usec);
2248 return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);
2249} 2248}
2250 2249
2251/* Remove acknowledged frames from the retransmission queue. */ 2250/* Remove acknowledged frames from the retransmission queue. */
@@ -2260,6 +2259,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2260 u32 pkts_acked = 0; 2259 u32 pkts_acked = 0;
2261 void (*rtt_sample)(struct sock *sk, u32 usrtt) 2260 void (*rtt_sample)(struct sock *sk, u32 usrtt)
2262 = icsk->icsk_ca_ops->rtt_sample; 2261 = icsk->icsk_ca_ops->rtt_sample;
2262 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
2263 2263
2264 while ((skb = skb_peek(&sk->sk_write_queue)) && 2264 while ((skb = skb_peek(&sk->sk_write_queue)) &&
2265 skb != sk->sk_send_head) { 2265 skb != sk->sk_send_head) {
@@ -2308,8 +2308,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2308 seq_rtt = -1; 2308 seq_rtt = -1;
2309 } else if (seq_rtt < 0) { 2309 } else if (seq_rtt < 0) {
2310 seq_rtt = now - scb->when; 2310 seq_rtt = now - scb->when;
2311 if (rtt_sample) 2311 skb_get_timestamp(skb, &tv);
2312 (*rtt_sample)(sk, tcp_usrtt(skb));
2313 } 2312 }
2314 if (sacked & TCPCB_SACKED_ACKED) 2313 if (sacked & TCPCB_SACKED_ACKED)
2315 tp->sacked_out -= tcp_skb_pcount(skb); 2314 tp->sacked_out -= tcp_skb_pcount(skb);
@@ -2322,8 +2321,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2322 } 2321 }
2323 } else if (seq_rtt < 0) { 2322 } else if (seq_rtt < 0) {
2324 seq_rtt = now - scb->when; 2323 seq_rtt = now - scb->when;
2325 if (rtt_sample) 2324 skb_get_timestamp(skb, &tv);
2326 (*rtt_sample)(sk, tcp_usrtt(skb));
2327 } 2325 }
2328 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2326 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2329 tcp_packets_out_dec(tp, skb); 2327 tcp_packets_out_dec(tp, skb);
@@ -2335,6 +2333,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2335 if (acked&FLAG_ACKED) { 2333 if (acked&FLAG_ACKED) {
2336 tcp_ack_update_rtt(sk, acked, seq_rtt); 2334 tcp_ack_update_rtt(sk, acked, seq_rtt);
2337 tcp_ack_packets_out(sk, tp); 2335 tcp_ack_packets_out(sk, tp);
2336 if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
2337 (*rtt_sample)(sk, tcp_usrtt(&tv));
2338 2338
2339 if (icsk->icsk_ca_ops->pkts_acked) 2339 if (icsk->icsk_ca_ops->pkts_acked)
2340 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked); 2340 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
@@ -2629,7 +2629,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2629 switch(opcode) { 2629 switch(opcode) {
2630 case TCPOPT_MSS: 2630 case TCPOPT_MSS:
2631 if(opsize==TCPOLEN_MSS && th->syn && !estab) { 2631 if(opsize==TCPOLEN_MSS && th->syn && !estab) {
2632 u16 in_mss = ntohs(get_unaligned((__u16 *)ptr)); 2632 u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
2633 if (in_mss) { 2633 if (in_mss) {
2634 if (opt_rx->user_mss && opt_rx->user_mss < in_mss) 2634 if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
2635 in_mss = opt_rx->user_mss; 2635 in_mss = opt_rx->user_mss;
@@ -2657,8 +2657,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2657 if ((estab && opt_rx->tstamp_ok) || 2657 if ((estab && opt_rx->tstamp_ok) ||
2658 (!estab && sysctl_tcp_timestamps)) { 2658 (!estab && sysctl_tcp_timestamps)) {
2659 opt_rx->saw_tstamp = 1; 2659 opt_rx->saw_tstamp = 1;
2660 opt_rx->rcv_tsval = ntohl(get_unaligned((__u32 *)ptr)); 2660 opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
2661 opt_rx->rcv_tsecr = ntohl(get_unaligned((__u32 *)(ptr+4))); 2661 opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
2662 } 2662 }
2663 } 2663 }
2664 break; 2664 break;
@@ -2695,8 +2695,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
2695 return 0; 2695 return 0;
2696 } else if (tp->rx_opt.tstamp_ok && 2696 } else if (tp->rx_opt.tstamp_ok &&
2697 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 2697 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
2698 __u32 *ptr = (__u32 *)(th + 1); 2698 __be32 *ptr = (__be32 *)(th + 1);
2699 if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 2699 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
2700 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 2700 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
2701 tp->rx_opt.saw_tstamp = 1; 2701 tp->rx_opt.saw_tstamp = 1;
2702 ++ptr; 2702 ++ptr;
@@ -3911,10 +3911,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3911 3911
3912 /* Check timestamp */ 3912 /* Check timestamp */
3913 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 3913 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
3914 __u32 *ptr = (__u32 *)(th + 1); 3914 __be32 *ptr = (__be32 *)(th + 1);
3915 3915
3916 /* No? Slow path! */ 3916 /* No? Slow path! */
3917 if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3917 if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3918 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) 3918 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
3919 goto slow_path; 3919 goto slow_path;
3920 3920
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 39b179856082..22ef8bd26620 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -159,7 +159,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
159 struct tcp_sock *tp = tcp_sk(sk); 159 struct tcp_sock *tp = tcp_sk(sk);
160 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; 160 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
161 struct rtable *rt; 161 struct rtable *rt;
162 u32 daddr, nexthop; 162 __be32 daddr, nexthop;
163 int tmp; 163 int tmp;
164 int err; 164 int err;
165 165
@@ -355,7 +355,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
355 return; 355 return;
356 } 356 }
357 if (sk->sk_state == TCP_TIME_WAIT) { 357 if (sk->sk_state == TCP_TIME_WAIT) {
358 inet_twsk_put((struct inet_timewait_sock *)sk); 358 inet_twsk_put(inet_twsk(sk));
359 return; 359 return;
360 } 360 }
361 361
@@ -373,7 +373,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
373 seq = ntohl(th->seq); 373 seq = ntohl(th->seq);
374 if (sk->sk_state != TCP_LISTEN && 374 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) { 375 !between(seq, tp->snd_una, tp->snd_nxt)) {
376 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS); 376 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
377 goto out; 377 goto out;
378 } 378 }
379 379
@@ -578,7 +578,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
578 struct tcphdr *th = skb->h.th; 578 struct tcphdr *th = skb->h.th;
579 struct { 579 struct {
580 struct tcphdr th; 580 struct tcphdr th;
581 u32 tsopt[3]; 581 u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2];
582 } rep; 582 } rep;
583 struct ip_reply_arg arg; 583 struct ip_reply_arg arg;
584 584
@@ -734,8 +734,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
734 struct inet_request_sock *ireq; 734 struct inet_request_sock *ireq;
735 struct tcp_options_received tmp_opt; 735 struct tcp_options_received tmp_opt;
736 struct request_sock *req; 736 struct request_sock *req;
737 __u32 saddr = skb->nh.iph->saddr; 737 __be32 saddr = skb->nh.iph->saddr;
738 __u32 daddr = skb->nh.iph->daddr; 738 __be32 daddr = skb->nh.iph->daddr;
739 __u32 isn = TCP_SKB_CB(skb)->when; 739 __u32 isn = TCP_SKB_CB(skb)->when;
740 struct dst_entry *dst = NULL; 740 struct dst_entry *dst = NULL;
741#ifdef CONFIG_SYN_COOKIES 741#ifdef CONFIG_SYN_COOKIES
@@ -960,7 +960,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
960 bh_lock_sock(nsk); 960 bh_lock_sock(nsk);
961 return nsk; 961 return nsk;
962 } 962 }
963 inet_twsk_put((struct inet_timewait_sock *)nsk); 963 inet_twsk_put(inet_twsk(nsk));
964 return NULL; 964 return NULL;
965 } 965 }
966 966
@@ -1154,26 +1154,24 @@ discard_and_relse:
1154 1154
1155do_time_wait: 1155do_time_wait:
1156 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1156 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1157 inet_twsk_put((struct inet_timewait_sock *) sk); 1157 inet_twsk_put(inet_twsk(sk));
1158 goto discard_it; 1158 goto discard_it;
1159 } 1159 }
1160 1160
1161 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1161 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1162 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1162 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1163 inet_twsk_put((struct inet_timewait_sock *) sk); 1163 inet_twsk_put(inet_twsk(sk));
1164 goto discard_it; 1164 goto discard_it;
1165 } 1165 }
1166 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1166 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1167 skb, th)) {
1168 case TCP_TW_SYN: { 1167 case TCP_TW_SYN: {
1169 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1168 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1170 skb->nh.iph->daddr, 1169 skb->nh.iph->daddr,
1171 th->dest, 1170 th->dest,
1172 inet_iif(skb)); 1171 inet_iif(skb));
1173 if (sk2) { 1172 if (sk2) {
1174 inet_twsk_deschedule((struct inet_timewait_sock *)sk, 1173 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1175 &tcp_death_row); 1174 inet_twsk_put(inet_twsk(sk));
1176 inet_twsk_put((struct inet_timewait_sock *)sk);
1177 sk = sk2; 1175 sk = sk2;
1178 goto process; 1176 goto process;
1179 } 1177 }
@@ -1763,7 +1761,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1763 1761
1764static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i) 1762static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1765{ 1763{
1766 unsigned int dest, src; 1764 __be32 dest, src;
1767 __u16 destp, srcp; 1765 __u16 destp, srcp;
1768 int ttd = tw->tw_ttd - jiffies; 1766 int ttd = tw->tw_ttd - jiffies;
1769 1767
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 308fb7e071c5..f0ebaf0e21cb 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -31,8 +31,6 @@
31 * Hung Hing Lun, Mike <hlhung3i@gmail.com> 31 * Hung Hing Lun, Mike <hlhung3i@gmail.com>
32 * SourceForge project page: 32 * SourceForge project page:
33 * http://tcp-lp-mod.sourceforge.net/ 33 * http://tcp-lp-mod.sourceforge.net/
34 *
35 * Version: $Id: tcp_lp.c,v 1.24 2006/09/05 20:22:53 hswong3i Exp $
36 */ 34 */
37 35
38#include <linux/module.h> 36#include <linux/module.h>
@@ -164,7 +162,7 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
164 162
165 out: 163 out:
166 /* record time for successful remote HZ calc */ 164 /* record time for successful remote HZ calc */
167 if (rhz > 0) 165 if ((rhz >> 6) > 0)
168 lp->flag |= LP_VALID_RHZ; 166 lp->flag |= LP_VALID_RHZ;
169 else 167 else
170 lp->flag &= ~LP_VALID_RHZ; 168 lp->flag &= ~LP_VALID_RHZ;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 061edfae0c29..ca406157724c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -269,14 +269,14 @@ static u16 tcp_select_window(struct sock *sk)
269 return new_win; 269 return new_win;
270} 270}
271 271
272static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, 272static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
273 __u32 tstamp) 273 __u32 tstamp)
274{ 274{
275 if (tp->rx_opt.tstamp_ok) { 275 if (tp->rx_opt.tstamp_ok) {
276 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | 276 *ptr++ = htonl((TCPOPT_NOP << 24) |
277 (TCPOPT_NOP << 16) | 277 (TCPOPT_NOP << 16) |
278 (TCPOPT_TIMESTAMP << 8) | 278 (TCPOPT_TIMESTAMP << 8) |
279 TCPOLEN_TIMESTAMP); 279 TCPOLEN_TIMESTAMP);
280 *ptr++ = htonl(tstamp); 280 *ptr++ = htonl(tstamp);
281 *ptr++ = htonl(tp->rx_opt.ts_recent); 281 *ptr++ = htonl(tp->rx_opt.ts_recent);
282 } 282 }
@@ -305,7 +305,7 @@ static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp,
305 * MAX_SYN_SIZE to match the new maximum number of options that you 305 * MAX_SYN_SIZE to match the new maximum number of options that you
306 * can generate. 306 * can generate.
307 */ 307 */
308static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, 308static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
309 int offer_wscale, int wscale, __u32 tstamp, 309 int offer_wscale, int wscale, __u32 tstamp,
310 __u32 ts_recent) 310 __u32 ts_recent)
311{ 311{
@@ -325,18 +325,27 @@ static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
325 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 325 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
326 if (ts) { 326 if (ts) {
327 if(sack) 327 if(sack)
328 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | 328 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
329 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 329 (TCPOLEN_SACK_PERM << 16) |
330 (TCPOPT_TIMESTAMP << 8) |
331 TCPOLEN_TIMESTAMP);
330 else 332 else
331 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 333 *ptr++ = htonl((TCPOPT_NOP << 24) |
332 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 334 (TCPOPT_NOP << 16) |
335 (TCPOPT_TIMESTAMP << 8) |
336 TCPOLEN_TIMESTAMP);
333 *ptr++ = htonl(tstamp); /* TSVAL */ 337 *ptr++ = htonl(tstamp); /* TSVAL */
334 *ptr++ = htonl(ts_recent); /* TSECR */ 338 *ptr++ = htonl(ts_recent); /* TSECR */
335 } else if(sack) 339 } else if(sack)
336 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 340 *ptr++ = htonl((TCPOPT_NOP << 24) |
337 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); 341 (TCPOPT_NOP << 16) |
342 (TCPOPT_SACK_PERM << 8) |
343 TCPOLEN_SACK_PERM);
338 if (offer_wscale) 344 if (offer_wscale)
339 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); 345 *ptr++ = htonl((TCPOPT_NOP << 24) |
346 (TCPOPT_WINDOW << 16) |
347 (TCPOLEN_WINDOW << 8) |
348 (wscale));
340} 349}
341 350
342/* This routine actually transmits TCP packets queued in by 351/* This routine actually transmits TCP packets queued in by
@@ -424,7 +433,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
424 th->dest = inet->dport; 433 th->dest = inet->dport;
425 th->seq = htonl(tcb->seq); 434 th->seq = htonl(tcb->seq);
426 th->ack_seq = htonl(tp->rcv_nxt); 435 th->ack_seq = htonl(tp->rcv_nxt);
427 *(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 436 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
428 tcb->flags); 437 tcb->flags);
429 438
430 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 439 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
@@ -445,7 +454,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
445 } 454 }
446 455
447 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 456 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
448 tcp_syn_build_options((__u32 *)(th + 1), 457 tcp_syn_build_options((__be32 *)(th + 1),
449 tcp_advertise_mss(sk), 458 tcp_advertise_mss(sk),
450 (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 459 (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
451 (sysctl_flags & SYSCTL_FLAG_SACK), 460 (sysctl_flags & SYSCTL_FLAG_SACK),
@@ -454,7 +463,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
454 tcb->when, 463 tcb->when,
455 tp->rx_opt.ts_recent); 464 tp->rx_opt.ts_recent);
456 } else { 465 } else {
457 tcp_build_and_update_options((__u32 *)(th + 1), 466 tcp_build_and_update_options((__be32 *)(th + 1),
458 tp, tcb->when); 467 tp, tcb->when);
459 TCP_ECN_send(sk, tp, skb, tcp_header_size); 468 TCP_ECN_send(sk, tp, skb, tcp_header_size);
460 } 469 }
@@ -1087,10 +1096,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1087 u32 send_win, cong_win, limit, in_flight; 1096 u32 send_win, cong_win, limit, in_flight;
1088 1097
1089 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1098 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1090 return 0; 1099 goto send_now;
1091 1100
1092 if (icsk->icsk_ca_state != TCP_CA_Open) 1101 if (icsk->icsk_ca_state != TCP_CA_Open)
1093 return 0; 1102 goto send_now;
1103
1104 /* Defer for less than two clock ticks. */
1105 if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1106 goto send_now;
1094 1107
1095 in_flight = tcp_packets_in_flight(tp); 1108 in_flight = tcp_packets_in_flight(tp);
1096 1109
@@ -1106,7 +1119,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1106 1119
1107 /* If a full-sized TSO skb can be sent, do it. */ 1120 /* If a full-sized TSO skb can be sent, do it. */
1108 if (limit >= 65536) 1121 if (limit >= 65536)
1109 return 0; 1122 goto send_now;
1110 1123
1111 if (sysctl_tcp_tso_win_divisor) { 1124 if (sysctl_tcp_tso_win_divisor) {
1112 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1125 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1116,7 +1129,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1116 */ 1129 */
1117 chunk /= sysctl_tcp_tso_win_divisor; 1130 chunk /= sysctl_tcp_tso_win_divisor;
1118 if (limit >= chunk) 1131 if (limit >= chunk)
1119 return 0; 1132 goto send_now;
1120 } else { 1133 } else {
1121 /* Different approach, try not to defer past a single 1134 /* Different approach, try not to defer past a single
1122 * ACK. Receiver should ACK every other full sized 1135 * ACK. Receiver should ACK every other full sized
@@ -1124,11 +1137,17 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1124 * then send now. 1137 * then send now.
1125 */ 1138 */
1126 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1139 if (limit > tcp_max_burst(tp) * tp->mss_cache)
1127 return 0; 1140 goto send_now;
1128 } 1141 }
1129 1142
1130 /* Ok, it looks like it is advisable to defer. */ 1143 /* Ok, it looks like it is advisable to defer. */
1144 tp->tso_deferred = 1 | (jiffies<<1);
1145
1131 return 1; 1146 return 1;
1147
1148send_now:
1149 tp->tso_deferred = 0;
1150 return 0;
1132} 1151}
1133 1152
1134/* Create a new MTU probe if we are ready. 1153/* Create a new MTU probe if we are ready.
@@ -2070,7 +2089,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2070 th->window = htons(req->rcv_wnd); 2089 th->window = htons(req->rcv_wnd);
2071 2090
2072 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2091 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2073 tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 2092 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2074 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 2093 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2075 TCP_SKB_CB(skb)->when, 2094 TCP_SKB_CB(skb)->when,
2076 req->ts_recent); 2095 req->ts_recent);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index dab37d2f65fc..4be336f17883 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -99,8 +99,10 @@ static int jtcp_sendmsg(struct kiocb *iocb, struct sock *sk,
99} 99}
100 100
101static struct jprobe tcp_send_probe = { 101static struct jprobe tcp_send_probe = {
102 .kp = { .addr = (kprobe_opcode_t *) &tcp_sendmsg, }, 102 .kp = {
103 .entry = (kprobe_opcode_t *) &jtcp_sendmsg, 103 .symbol_name = "tcp_sendmsg",
104 },
105 .entry = JPROBE_ENTRY(jtcp_sendmsg),
104}; 106};
105 107
106 108
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 77e265d7bb8f..865d75214a9a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -243,8 +243,8 @@ static void udp_v4_unhash(struct sock *sk)
243/* UDP is nearly always wildcards out the wazoo, it makes no sense to try 243/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
244 * harder than this. -DaveM 244 * harder than this. -DaveM
245 */ 245 */
246static struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, 246static struct sock *udp_v4_lookup_longway(__be32 saddr, __be16 sport,
247 u32 daddr, u16 dport, int dif) 247 __be32 daddr, __be16 dport, int dif)
248{ 248{
249 struct sock *sk, *result = NULL; 249 struct sock *sk, *result = NULL;
250 struct hlist_node *node; 250 struct hlist_node *node;
@@ -288,8 +288,8 @@ static struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport,
288 return result; 288 return result;
289} 289}
290 290
291static __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport, 291static __inline__ struct sock *udp_v4_lookup(__be32 saddr, __be16 sport,
292 u32 daddr, u16 dport, int dif) 292 __be32 daddr, __be16 dport, int dif)
293{ 293{
294 struct sock *sk; 294 struct sock *sk;
295 295
@@ -302,8 +302,8 @@ static __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport,
302} 302}
303 303
304static inline struct sock *udp_v4_mcast_next(struct sock *sk, 304static inline struct sock *udp_v4_mcast_next(struct sock *sk,
305 u16 loc_port, u32 loc_addr, 305 __be16 loc_port, __be32 loc_addr,
306 u16 rmt_port, u32 rmt_addr, 306 __be16 rmt_port, __be32 rmt_addr,
307 int dif) 307 int dif)
308{ 308{
309 struct hlist_node *node; 309 struct hlist_node *node;
@@ -498,7 +498,7 @@ out:
498} 498}
499 499
500 500
501static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr, unsigned long base) 501static unsigned short udp_check(struct udphdr *uh, int len, __be32 saddr, __be32 daddr, unsigned long base)
502{ 502{
503 return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base)); 503 return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
504} 504}
@@ -513,8 +513,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
513 struct rtable *rt = NULL; 513 struct rtable *rt = NULL;
514 int free = 0; 514 int free = 0;
515 int connected = 0; 515 int connected = 0;
516 u32 daddr, faddr, saddr; 516 __be32 daddr, faddr, saddr;
517 u16 dport; 517 __be16 dport;
518 u8 tos; 518 u8 tos;
519 int err; 519 int err;
520 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 520 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
@@ -675,6 +675,8 @@ do_append_data:
675 udp_flush_pending_frames(sk); 675 udp_flush_pending_frames(sk);
676 else if (!corkreq) 676 else if (!corkreq)
677 err = udp_push_pending_frames(sk, up); 677 err = udp_push_pending_frames(sk, up);
678 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
679 up->pending = 0;
678 release_sock(sk); 680 release_sock(sk);
679 681
680out: 682out:
@@ -931,7 +933,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
931 int iphlen, len; 933 int iphlen, len;
932 934
933 __u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr); 935 __u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr);
934 __u32 *udpdata32 = (__u32 *)udpdata; 936 __be32 *udpdata32 = (__be32 *)udpdata;
935 __u16 encap_type = up->encap_type; 937 __u16 encap_type = up->encap_type;
936 938
937 /* if we're overly short, let UDP handle it */ 939 /* if we're overly short, let UDP handle it */
@@ -1080,7 +1082,7 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1080 * so we don't need to lock the hashes. 1082 * so we don't need to lock the hashes.
1081 */ 1083 */
1082static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh, 1084static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
1083 u32 saddr, u32 daddr) 1085 __be32 saddr, __be32 daddr)
1084{ 1086{
1085 struct sock *sk; 1087 struct sock *sk;
1086 int dif; 1088 int dif;
@@ -1121,7 +1123,7 @@ static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
1121 * including udp header and folding it to skb->csum. 1123 * including udp header and folding it to skb->csum.
1122 */ 1124 */
1123static void udp_checksum_init(struct sk_buff *skb, struct udphdr *uh, 1125static void udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1124 unsigned short ulen, u32 saddr, u32 daddr) 1126 unsigned short ulen, __be32 saddr, __be32 daddr)
1125{ 1127{
1126 if (uh->check == 0) { 1128 if (uh->check == 0) {
1127 skb->ip_summed = CHECKSUM_UNNECESSARY; 1129 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1146,8 +1148,8 @@ int udp_rcv(struct sk_buff *skb)
1146 struct udphdr *uh; 1148 struct udphdr *uh;
1147 unsigned short ulen; 1149 unsigned short ulen;
1148 struct rtable *rt = (struct rtable*)skb->dst; 1150 struct rtable *rt = (struct rtable*)skb->dst;
1149 u32 saddr = skb->nh.iph->saddr; 1151 __be32 saddr = skb->nh.iph->saddr;
1150 u32 daddr = skb->nh.iph->daddr; 1152 __be32 daddr = skb->nh.iph->daddr;
1151 int len = skb->len; 1153 int len = skb->len;
1152 1154
1153 /* 1155 /*
@@ -1563,8 +1565,8 @@ void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1563static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) 1565static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1564{ 1566{
1565 struct inet_sock *inet = inet_sk(sp); 1567 struct inet_sock *inet = inet_sk(sp);
1566 unsigned int dest = inet->daddr; 1568 __be32 dest = inet->daddr;
1567 unsigned int src = inet->rcv_saddr; 1569 __be32 src = inet->rcv_saddr;
1568 __u16 destp = ntohs(inet->dport); 1570 __u16 destp = ntohs(inet->dport);
1569 __u16 srcp = ntohs(inet->sport); 1571 __u16 srcp = ntohs(inet->sport);
1570 1572
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 040e8475f295..8655d038364c 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -23,7 +23,7 @@ int xfrm4_rcv(struct sk_buff *skb)
23 23
24EXPORT_SYMBOL(xfrm4_rcv); 24EXPORT_SYMBOL(xfrm4_rcv);
25 25
26static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq) 26static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
27{ 27{
28 switch (nexthdr) { 28 switch (nexthdr) {
29 case IPPROTO_IPIP: 29 case IPPROTO_IPIP:
@@ -55,7 +55,7 @@ drop:
55int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) 55int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
56{ 56{
57 int err; 57 int err;
58 u32 spi, seq; 58 __be32 spi, seq;
59 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; 59 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
60 struct xfrm_state *x; 60 struct xfrm_state *x;
61 int xfrm_nr = 0; 61 int xfrm_nr = 0;
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
new file mode 100644
index 000000000000..89cf59ea7bbe
--- /dev/null
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -0,0 +1,139 @@
1/*
2 * xfrm4_mode_beet.c - BEET mode encapsulation for IPv4.
3 *
4 * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com>
5 * Miika Komu <miika@iki.fi>
6 * Herbert Xu <herbert@gondor.apana.org.au>
7 * Abhinav Pathak <abhinav.pathak@hiit.fi>
8 * Jeff Ahrenholz <ahrenholz@gmail.com>
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/stringify.h>
16#include <net/dst.h>
17#include <net/ip.h>
18#include <net/xfrm.h>
19
20/* Add encapsulation header.
21 *
22 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
23 * The following fields in it shall be filled in by x->type->output:
24 * tot_len
25 * check
26 *
27 * On exit, skb->h will be set to the start of the payload to be processed
28 * by x->type->output and skb->nh will be set to the top IP header.
29 */
30static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
31{
32 struct iphdr *iph, *top_iph = NULL;
33 int hdrlen, optlen;
34
35 iph = skb->nh.iph;
36 skb->h.ipiph = iph;
37
38 hdrlen = 0;
39 optlen = iph->ihl * 4 - sizeof(*iph);
40 if (unlikely(optlen))
41 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
42
43 skb->nh.raw = skb_push(skb, x->props.header_len + hdrlen);
44 top_iph = skb->nh.iph;
45 hdrlen = iph->ihl * 4 - optlen;
46 skb->h.raw += hdrlen;
47
48 memmove(top_iph, iph, hdrlen);
49 if (unlikely(optlen)) {
50 struct ip_beet_phdr *ph;
51
52 BUG_ON(optlen < 0);
53
54 ph = (struct ip_beet_phdr *)skb->h.raw;
55 ph->padlen = 4 - (optlen & 4);
56 ph->hdrlen = (optlen + ph->padlen + sizeof(*ph)) / 8;
57 ph->nexthdr = top_iph->protocol;
58
59 top_iph->protocol = IPPROTO_BEETPH;
60 top_iph->ihl = sizeof(struct iphdr) / 4;
61 }
62
63 top_iph->saddr = x->props.saddr.a4;
64 top_iph->daddr = x->id.daddr.a4;
65
66 return 0;
67}
68
69static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
70{
71 struct iphdr *iph = skb->nh.iph;
72 int phlen = 0;
73 int optlen = 0;
74 __u8 ph_nexthdr = 0, protocol = 0;
75 int err = -EINVAL;
76
77 protocol = iph->protocol;
78
79 if (unlikely(iph->protocol == IPPROTO_BEETPH)) {
80 struct ip_beet_phdr *ph = (struct ip_beet_phdr*)(iph + 1);
81
82 if (!pskb_may_pull(skb, sizeof(*ph)))
83 goto out;
84
85 phlen = ph->hdrlen * 8;
86 optlen = phlen - ph->padlen - sizeof(*ph);
87 if (optlen < 0 || optlen & 3 || optlen > 250)
88 goto out;
89
90 if (!pskb_may_pull(skb, phlen))
91 goto out;
92
93 ph_nexthdr = ph->nexthdr;
94 }
95
96 skb_push(skb, sizeof(*iph) - phlen + optlen);
97 memmove(skb->data, skb->nh.raw, sizeof(*iph));
98 skb->nh.raw = skb->data;
99
100 iph = skb->nh.iph;
101 iph->ihl = (sizeof(*iph) + optlen) / 4;
102 iph->tot_len = htons(skb->len);
103 iph->daddr = x->sel.daddr.a4;
104 iph->saddr = x->sel.saddr.a4;
105 if (ph_nexthdr)
106 iph->protocol = ph_nexthdr;
107 else
108 iph->protocol = protocol;
109 iph->check = 0;
110 iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
111 err = 0;
112out:
113 return err;
114}
115
116static struct xfrm_mode xfrm4_beet_mode = {
117 .input = xfrm4_beet_input,
118 .output = xfrm4_beet_output,
119 .owner = THIS_MODULE,
120 .encap = XFRM_MODE_BEET,
121};
122
123static int __init xfrm4_beet_init(void)
124{
125 return xfrm_register_mode(&xfrm4_beet_mode, AF_INET);
126}
127
128static void __exit xfrm4_beet_exit(void)
129{
130 int err;
131
132 err = xfrm_unregister_mode(&xfrm4_beet_mode, AF_INET);
133 BUG_ON(err);
134}
135
136module_init(xfrm4_beet_init);
137module_exit(xfrm4_beet_exit);
138MODULE_LICENSE("GPL");
139MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_BEET);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index eabcd27b1767..1bed0cdf53e3 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -52,7 +52,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst && 52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
53 xdst->u.rt.fl.fl4_src == fl->fl4_src && 53 xdst->u.rt.fl.fl4_src == fl->fl4_src &&
54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos && 54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
55 xfrm_bundle_ok(xdst, fl, AF_INET, 0)) { 55 xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
56 dst_clone(dst); 56 dst_clone(dst);
57 break; 57 break;
58 } 58 }
@@ -221,7 +221,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
221 221
222 case IPPROTO_ESP: 222 case IPPROTO_ESP:
223 if (pskb_may_pull(skb, xprth + 4 - skb->data)) { 223 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
224 u32 *ehdr = (u32 *)xprth; 224 __be32 *ehdr = (__be32 *)xprth;
225 225
226 fl->fl_ipsec_spi = ehdr[0]; 226 fl->fl_ipsec_spi = ehdr[0];
227 } 227 }
@@ -229,7 +229,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
229 229
230 case IPPROTO_AH: 230 case IPPROTO_AH:
231 if (pskb_may_pull(skb, xprth + 8 - skb->data)) { 231 if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
232 u32 *ah_hdr = (u32*)xprth; 232 __be32 *ah_hdr = (__be32*)xprth;
233 233
234 fl->fl_ipsec_spi = ah_hdr[1]; 234 fl->fl_ipsec_spi = ah_hdr[1];
235 } 235 }
@@ -237,7 +237,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
237 237
238 case IPPROTO_COMP: 238 case IPPROTO_COMP:
239 if (pskb_may_pull(skb, xprth + 4 - skb->data)) { 239 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
240 u16 *ipcomp_hdr = (u16 *)xprth; 240 __be16 *ipcomp_hdr = (__be16 *)xprth;
241 241
242 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); 242 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
243 } 243 }
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index fe2034494d08..3cc3df0c6ece 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -29,9 +29,9 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
29 x->sel.daddr.a4 = fl->fl4_dst; 29 x->sel.daddr.a4 = fl->fl4_dst;
30 x->sel.saddr.a4 = fl->fl4_src; 30 x->sel.saddr.a4 = fl->fl4_src;
31 x->sel.dport = xfrm_flowi_dport(fl); 31 x->sel.dport = xfrm_flowi_dport(fl);
32 x->sel.dport_mask = ~0; 32 x->sel.dport_mask = htons(0xffff);
33 x->sel.sport = xfrm_flowi_sport(fl); 33 x->sel.sport = xfrm_flowi_sport(fl);
34 x->sel.sport_mask = ~0; 34 x->sel.sport_mask = htons(0xffff);
35 x->sel.prefixlen_d = 32; 35 x->sel.prefixlen_d = 32;
36 x->sel.prefixlen_s = 32; 36 x->sel.prefixlen_s = 32;
37 x->sel.proto = fl->proto; 37 x->sel.proto = fl->proto;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index a2d211da2aba..6e48f52e197c 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -136,6 +136,16 @@ config INET6_XFRM_MODE_TUNNEL
136 136
137 If unsure, say Y. 137 If unsure, say Y.
138 138
139config INET6_XFRM_MODE_BEET
140 tristate "IPv6: IPsec BEET mode"
141 depends on IPV6
142 default IPV6
143 select XFRM
144 ---help---
145 Support for IPsec BEET mode.
146
147 If unsure, say Y.
148
139config INET6_XFRM_MODE_ROUTEOPTIMIZATION 149config INET6_XFRM_MODE_ROUTEOPTIMIZATION
140 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)" 150 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)"
141 depends on IPV6 && EXPERIMENTAL 151 depends on IPV6 && EXPERIMENTAL
@@ -143,6 +153,19 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
143 ---help--- 153 ---help---
144 Support for MIPv6 route optimization mode. 154 Support for MIPv6 route optimization mode.
145 155
156config IPV6_SIT
157 tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
158 depends on IPV6
159 default y
160 ---help---
161 Tunneling means encapsulating data of one protocol type within
162 another protocol and sending it over a channel that understands the
163 encapsulating protocol. This driver implements encapsulation of IPv6
164 into IPv4 packets. This is useful if you want to connect two IPv6
165 networks over an IPv4-only path.
166
167 Saying M here will produce a module called sit.ko. If unsure, say Y.
168
146config IPV6_TUNNEL 169config IPV6_TUNNEL
147 tristate "IPv6: IPv6-in-IPv6 tunnel" 170 tristate "IPv6: IPv6-in-IPv6 tunnel"
148 select INET6_TUNNEL 171 select INET6_TUNNEL
@@ -152,9 +175,16 @@ config IPV6_TUNNEL
152 175
153 If unsure, say N. 176 If unsure, say N.
154 177
178config IPV6_MULTIPLE_TABLES
179 bool "IPv6: Multiple Routing Tables"
180 depends on IPV6 && EXPERIMENTAL
181 select FIB_RULES
182 ---help---
183 Support multiple routing tables.
184
155config IPV6_SUBTREES 185config IPV6_SUBTREES
156 bool "IPv6: source address based routing" 186 bool "IPv6: source address based routing"
157 depends on IPV6 && EXPERIMENTAL 187 depends on IPV6_MULTIPLE_TABLES
158 ---help--- 188 ---help---
159 Enable routing by source address or prefix. 189 Enable routing by source address or prefix.
160 190
@@ -166,13 +196,6 @@ config IPV6_SUBTREES
166 196
167 If unsure, say N. 197 If unsure, say N.
168 198
169config IPV6_MULTIPLE_TABLES
170 bool "IPv6: Multiple Routing Tables"
171 depends on IPV6 && EXPERIMENTAL
172 select FIB_RULES
173 ---help---
174 Support multiple routing tables.
175
176config IPV6_ROUTE_FWMARK 199config IPV6_ROUTE_FWMARK
177 bool "IPv6: use netfilter MARK value as routing key" 200 bool "IPv6: use netfilter MARK value as routing key"
178 depends on IPV6_MULTIPLE_TABLES && NETFILTER 201 depends on IPV6_MULTIPLE_TABLES && NETFILTER
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 0213c6612b58..addcc011bc01 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_IPV6) += ipv6.o 5obj-$(CONFIG_IPV6) += ipv6.o
6 6
7ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \ 7ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \ 8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ 10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
@@ -26,8 +26,10 @@ obj-$(CONFIG_INET6_TUNNEL) += tunnel6.o
26obj-$(CONFIG_INET6_XFRM_MODE_TRANSPORT) += xfrm6_mode_transport.o 26obj-$(CONFIG_INET6_XFRM_MODE_TRANSPORT) += xfrm6_mode_transport.o
27obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o 27obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o
28obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o 28obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
29obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
29obj-$(CONFIG_NETFILTER) += netfilter/ 30obj-$(CONFIG_NETFILTER) += netfilter/
30 31
32obj-$(CONFIG_IPV6_SIT) += sit.o
31obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 33obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
32 34
33obj-y += exthdrs_core.o 35obj-y += exthdrs_core.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c18676352397..b312a5f7a759 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -396,8 +396,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
396 ndev->regen_timer.data = (unsigned long) ndev; 396 ndev->regen_timer.data = (unsigned long) ndev;
397 if ((dev->flags&IFF_LOOPBACK) || 397 if ((dev->flags&IFF_LOOPBACK) ||
398 dev->type == ARPHRD_TUNNEL || 398 dev->type == ARPHRD_TUNNEL ||
399 dev->type == ARPHRD_NONE || 399#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
400 dev->type == ARPHRD_SIT) { 400 dev->type == ARPHRD_SIT ||
401#endif
402 dev->type == ARPHRD_NONE) {
401 printk(KERN_INFO 403 printk(KERN_INFO
402 "%s: Disabled Privacy Extensions\n", 404 "%s: Disabled Privacy Extensions\n",
403 dev->name); 405 dev->name);
@@ -1258,8 +1260,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
1258{ 1260{
1259 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 1261 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
1260 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); 1262 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
1261 u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; 1263 __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
1262 u32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 1264 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
1263 int sk_ipv6only = ipv6_only_sock(sk); 1265 int sk_ipv6only = ipv6_only_sock(sk);
1264 int sk2_ipv6only = inet_v6_ipv6only(sk2); 1266 int sk2_ipv6only = inet_v6_ipv6only(sk2);
1265 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 1267 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -1546,8 +1548,10 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1546 This thing is done here expecting that the whole 1548 This thing is done here expecting that the whole
1547 class of non-broadcast devices need not cloning. 1549 class of non-broadcast devices need not cloning.
1548 */ 1550 */
1551#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
1549 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) 1552 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
1550 cfg.fc_flags |= RTF_NONEXTHOP; 1553 cfg.fc_flags |= RTF_NONEXTHOP;
1554#endif
1551 1555
1552 ip6_route_add(&cfg); 1556 ip6_route_add(&cfg);
1553} 1557}
@@ -1569,6 +1573,7 @@ static void addrconf_add_mroute(struct net_device *dev)
1569 ip6_route_add(&cfg); 1573 ip6_route_add(&cfg);
1570} 1574}
1571 1575
1576#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
1572static void sit_route_add(struct net_device *dev) 1577static void sit_route_add(struct net_device *dev)
1573{ 1578{
1574 struct fib6_config cfg = { 1579 struct fib6_config cfg = {
@@ -1582,6 +1587,7 @@ static void sit_route_add(struct net_device *dev)
1582 /* prefix length - 96 bits "::d.d.d.d" */ 1587 /* prefix length - 96 bits "::d.d.d.d" */
1583 ip6_route_add(&cfg); 1588 ip6_route_add(&cfg);
1584} 1589}
1590#endif
1585 1591
1586static void addrconf_add_lroute(struct net_device *dev) 1592static void addrconf_add_lroute(struct net_device *dev)
1587{ 1593{
@@ -1852,6 +1858,7 @@ int addrconf_set_dstaddr(void __user *arg)
1852 if (dev == NULL) 1858 if (dev == NULL)
1853 goto err_exit; 1859 goto err_exit;
1854 1860
1861#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
1855 if (dev->type == ARPHRD_SIT) { 1862 if (dev->type == ARPHRD_SIT) {
1856 struct ifreq ifr; 1863 struct ifreq ifr;
1857 mm_segment_t oldfs; 1864 mm_segment_t oldfs;
@@ -1881,6 +1888,7 @@ int addrconf_set_dstaddr(void __user *arg)
1881 err = dev_open(dev); 1888 err = dev_open(dev);
1882 } 1889 }
1883 } 1890 }
1891#endif
1884 1892
1885err_exit: 1893err_exit:
1886 rtnl_unlock(); 1894 rtnl_unlock();
@@ -2010,6 +2018,7 @@ int addrconf_del_ifaddr(void __user *arg)
2010 return err; 2018 return err;
2011} 2019}
2012 2020
2021#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2013static void sit_add_v4_addrs(struct inet6_dev *idev) 2022static void sit_add_v4_addrs(struct inet6_dev *idev)
2014{ 2023{
2015 struct inet6_ifaddr * ifp; 2024 struct inet6_ifaddr * ifp;
@@ -2078,6 +2087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2078 } 2087 }
2079 } 2088 }
2080} 2089}
2090#endif
2081 2091
2082static void init_loopback(struct net_device *dev) 2092static void init_loopback(struct net_device *dev)
2083{ 2093{
@@ -2141,6 +2151,7 @@ static void addrconf_dev_config(struct net_device *dev)
2141 addrconf_add_linklocal(idev, &addr); 2151 addrconf_add_linklocal(idev, &addr);
2142} 2152}
2143 2153
2154#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2144static void addrconf_sit_config(struct net_device *dev) 2155static void addrconf_sit_config(struct net_device *dev)
2145{ 2156{
2146 struct inet6_dev *idev; 2157 struct inet6_dev *idev;
@@ -2166,6 +2177,7 @@ static void addrconf_sit_config(struct net_device *dev)
2166 } else 2177 } else
2167 sit_route_add(dev); 2178 sit_route_add(dev);
2168} 2179}
2180#endif
2169 2181
2170static inline int 2182static inline int
2171ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) 2183ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
@@ -2260,9 +2272,11 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2260 } 2272 }
2261 2273
2262 switch(dev->type) { 2274 switch(dev->type) {
2275#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2263 case ARPHRD_SIT: 2276 case ARPHRD_SIT:
2264 addrconf_sit_config(dev); 2277 addrconf_sit_config(dev);
2265 break; 2278 break;
2279#endif
2266 case ARPHRD_TUNNEL6: 2280 case ARPHRD_TUNNEL6:
2267 addrconf_ip6_tnl_config(dev); 2281 addrconf_ip6_tnl_config(dev);
2268 break; 2282 break;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bf6e8aff19d4..858cae29581c 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -246,7 +246,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
246 struct sock *sk = sock->sk; 246 struct sock *sk = sock->sk;
247 struct inet_sock *inet = inet_sk(sk); 247 struct inet_sock *inet = inet_sk(sk);
248 struct ipv6_pinfo *np = inet6_sk(sk); 248 struct ipv6_pinfo *np = inet6_sk(sk);
249 __u32 v4addr = 0; 249 __be32 v4addr = 0;
250 unsigned short snum; 250 unsigned short snum;
251 int addr_type = 0; 251 int addr_type = 0;
252 int err = 0; 252 int err = 0;
@@ -850,7 +850,6 @@ static int __init inet6_init(void)
850 err = addrconf_init(); 850 err = addrconf_init();
851 if (err) 851 if (err)
852 goto addrconf_fail; 852 goto addrconf_fail;
853 sit_init();
854 853
855 /* Init v6 extension headers. */ 854 /* Init v6 extension headers. */
856 ipv6_rthdr_init(); 855 ipv6_rthdr_init();
@@ -927,7 +926,6 @@ static void __exit inet6_exit(void)
927 mip6_fini(); 926 mip6_fini();
928#endif 927#endif
929 /* Cleanup code parts. */ 928 /* Cleanup code parts. */
930 sit_cleanup();
931 ip6_flowlabel_cleanup(); 929 ip6_flowlabel_cleanup();
932 addrconf_cleanup(); 930 addrconf_cleanup();
933 ip6_route_cleanup(); 931 ip6_route_cleanup();
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 34f5bfaddfc2..1896ecb52899 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -13,7 +13,6 @@
13 * Ville Nuorvala <vnuorval@tcs.hut.fi> 13 * Ville Nuorvala <vnuorval@tcs.hut.fi>
14 */ 14 */
15 15
16#include <linux/config.h>
17#include <linux/netdevice.h> 16#include <linux/netdevice.h>
18 17
19#include <net/fib_rules.h> 18#include <net/fib_rules.h>
@@ -118,12 +117,15 @@ static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
118{ 117{
119 struct fib6_rule *r = (struct fib6_rule *) rule; 118 struct fib6_rule *r = (struct fib6_rule *) rule;
120 119
121 if (!ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen)) 120 if (r->dst.plen &&
121 !ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen))
122 return 0; 122 return 0;
123 123
124 if ((flags & RT6_LOOKUP_F_HAS_SADDR) && 124 if (r->src.plen) {
125 !ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen)) 125 if (!(flags & RT6_LOOKUP_F_HAS_SADDR) ||
126 return 0; 126 !ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen))
127 return 0;
128 }
127 129
128 if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff)) 130 if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff))
129 return 0; 131 return 0;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index d2f3fc990bfa..8accd1fbeeda 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -64,7 +64,7 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
64{ 64{
65 struct sock *sk; 65 struct sock *sk;
66 const struct hlist_node *node; 66 const struct hlist_node *node;
67 const __u32 ports = INET_COMBINED_PORTS(sport, hnum); 67 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
68 /* Optimize here for direct hit, only listening connections can 68 /* Optimize here for direct hit, only listening connections can
69 * have wildcards anyways. 69 * have wildcards anyways.
70 */ 70 */
@@ -82,7 +82,7 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
82 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { 82 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
83 const struct inet_timewait_sock *tw = inet_twsk(sk); 83 const struct inet_timewait_sock *tw = inet_twsk(sk);
84 84
85 if(*((__u32 *)&(tw->tw_dport)) == ports && 85 if(*((__portpair *)&(tw->tw_dport)) == ports &&
86 sk->sk_family == PF_INET6) { 86 sk->sk_family == PF_INET6) {
87 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 87 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
88 88
@@ -171,7 +171,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
171 const struct in6_addr *daddr = &np->rcv_saddr; 171 const struct in6_addr *daddr = &np->rcv_saddr;
172 const struct in6_addr *saddr = &np->daddr; 172 const struct in6_addr *saddr = &np->daddr;
173 const int dif = sk->sk_bound_dev_if; 173 const int dif = sk->sk_bound_dev_if;
174 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 174 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
175 const unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, 175 const unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr,
176 inet->dport); 176 inet->dport);
177 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 177 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
@@ -188,7 +188,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
188 188
189 tw = inet_twsk(sk2); 189 tw = inet_twsk(sk2);
190 190
191 if(*((__u32 *)&(tw->tw_dport)) == ports && 191 if(*((__portpair *)&(tw->tw_dport)) == ports &&
192 sk2->sk_family == PF_INET6 && 192 sk2->sk_family == PF_INET6 &&
193 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) && 193 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
194 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) && 194 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index ad9c6e824e62..71f59f18ede8 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -178,7 +178,7 @@ out_ok:
178static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 178static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
179 int type, int code, int offset, __u32 info) 179 int type, int code, int offset, __u32 info)
180{ 180{
181 u32 spi; 181 __be32 spi;
182 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 182 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
183 struct ipv6_comp_hdr *ipcomph = (struct ipv6_comp_hdr*)(skb->data+offset); 183 struct ipv6_comp_hdr *ipcomph = (struct ipv6_comp_hdr*)(skb->data+offset);
184 struct xfrm_state *x; 184 struct xfrm_state *x;
@@ -199,6 +199,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
199static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 199static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
200{ 200{
201 struct xfrm_state *t = NULL; 201 struct xfrm_state *t = NULL;
202 u8 mode = XFRM_MODE_TUNNEL;
202 203
203 t = xfrm_state_alloc(); 204 t = xfrm_state_alloc();
204 if (!t) 205 if (!t)
@@ -212,7 +213,9 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
212 memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr)); 213 memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr));
213 memcpy(&t->sel, &x->sel, sizeof(t->sel)); 214 memcpy(&t->sel, &x->sel, sizeof(t->sel));
214 t->props.family = AF_INET6; 215 t->props.family = AF_INET6;
215 t->props.mode = XFRM_MODE_TUNNEL; 216 if (x->props.mode == XFRM_MODE_BEET)
217 mode = x->props.mode;
218 t->props.mode = mode;
216 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); 219 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
217 220
218 if (xfrm_init_state(t)) 221 if (xfrm_init_state(t))
@@ -234,7 +237,7 @@ static int ipcomp6_tunnel_attach(struct xfrm_state *x)
234{ 237{
235 int err = 0; 238 int err = 0;
236 struct xfrm_state *t = NULL; 239 struct xfrm_state *t = NULL;
237 u32 spi; 240 __be32 spi;
238 241
239 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr); 242 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr);
240 if (spi) 243 if (spi)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4f3bb7fcc8b5..de6b91981b30 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -123,6 +123,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
123 struct ipv6hdr *ipv6h; 123 struct ipv6hdr *ipv6h;
124 struct inet6_protocol *ops; 124 struct inet6_protocol *ops;
125 125
126 if (!(features & NETIF_F_HW_CSUM))
127 features &= ~NETIF_F_SG;
128
126 if (unlikely(skb_shinfo(skb)->gso_type & 129 if (unlikely(skb_shinfo(skb)->gso_type &
127 ~(SKB_GSO_UDP | 130 ~(SKB_GSO_UDP |
128 SKB_GSO_DODGY | 131 SKB_GSO_DODGY |
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 99d116caecda..7ccdc8fc5a31 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -22,7 +22,6 @@
22 * Masahide NAKAMURA @USAGI 22 * Masahide NAKAMURA @USAGI
23 */ 23 */
24 24
25#include <linux/config.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/skbuff.h> 26#include <linux/skbuff.h>
28#include <linux/time.h> 27#include <linux/time.h>
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0304b5fe8d6a..41a8a5f06602 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -967,8 +967,6 @@ static void ndisc_recv_na(struct sk_buff *skb)
967 ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp && 967 ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp &&
968 pneigh_lookup(&nd_tbl, &msg->target, dev, 0)) { 968 pneigh_lookup(&nd_tbl, &msg->target, dev, 0)) {
969 /* XXX: idev->cnf.prixy_ndp */ 969 /* XXX: idev->cnf.prixy_ndp */
970 WARN_ON(skb->dst != NULL &&
971 ((struct rt6_info *)skb->dst)->rt6i_idev);
972 goto out; 970 goto out;
973 } 971 }
974 972
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d6b4b4f48d18..c953466b7afd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -141,6 +141,10 @@ struct rt6_info ip6_null_entry = {
141 141
142#ifdef CONFIG_IPV6_MULTIPLE_TABLES 142#ifdef CONFIG_IPV6_MULTIPLE_TABLES
143 143
144static int ip6_pkt_prohibit(struct sk_buff *skb);
145static int ip6_pkt_prohibit_out(struct sk_buff *skb);
146static int ip6_pkt_blk_hole(struct sk_buff *skb);
147
144struct rt6_info ip6_prohibit_entry = { 148struct rt6_info ip6_prohibit_entry = {
145 .u = { 149 .u = {
146 .dst = { 150 .dst = {
@@ -150,8 +154,8 @@ struct rt6_info ip6_prohibit_entry = {
150 .obsolete = -1, 154 .obsolete = -1,
151 .error = -EACCES, 155 .error = -EACCES,
152 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 156 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
153 .input = ip6_pkt_discard, 157 .input = ip6_pkt_prohibit,
154 .output = ip6_pkt_discard_out, 158 .output = ip6_pkt_prohibit_out,
155 .ops = &ip6_dst_ops, 159 .ops = &ip6_dst_ops,
156 .path = (struct dst_entry*)&ip6_prohibit_entry, 160 .path = (struct dst_entry*)&ip6_prohibit_entry,
157 } 161 }
@@ -170,8 +174,8 @@ struct rt6_info ip6_blk_hole_entry = {
170 .obsolete = -1, 174 .obsolete = -1,
171 .error = -EINVAL, 175 .error = -EINVAL,
172 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 176 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
173 .input = ip6_pkt_discard, 177 .input = ip6_pkt_blk_hole,
174 .output = ip6_pkt_discard_out, 178 .output = ip6_pkt_blk_hole,
175 .ops = &ip6_dst_ops, 179 .ops = &ip6_dst_ops,
176 .path = (struct dst_entry*)&ip6_blk_hole_entry, 180 .path = (struct dst_entry*)&ip6_blk_hole_entry,
177 } 181 }
@@ -484,7 +488,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
484do { \ 488do { \
485 if (rt == &ip6_null_entry) { \ 489 if (rt == &ip6_null_entry) { \
486 struct fib6_node *pn; \ 490 struct fib6_node *pn; \
487 while (fn) { \ 491 while (1) { \
488 if (fn->fn_flags & RTN_TL_ROOT) \ 492 if (fn->fn_flags & RTN_TL_ROOT) \
489 goto out; \ 493 goto out; \
490 pn = fn->parent; \ 494 pn = fn->parent; \
@@ -529,13 +533,17 @@ struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
529 .nl_u = { 533 .nl_u = {
530 .ip6_u = { 534 .ip6_u = {
531 .daddr = *daddr, 535 .daddr = *daddr,
532 /* TODO: saddr */
533 }, 536 },
534 }, 537 },
535 }; 538 };
536 struct dst_entry *dst; 539 struct dst_entry *dst;
537 int flags = strict ? RT6_LOOKUP_F_IFACE : 0; 540 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
538 541
542 if (saddr) {
543 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
544 flags |= RT6_LOOKUP_F_HAS_SADDR;
545 }
546
539 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup); 547 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup);
540 if (dst->error == 0) 548 if (dst->error == 0)
541 return (struct rt6_info *) dst; 549 return (struct rt6_info *) dst;
@@ -614,8 +622,6 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d
614 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); 622 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
615 rt->rt6i_dst.plen = 128; 623 rt->rt6i_dst.plen = 128;
616 rt->rt6i_flags |= RTF_CACHE; 624 rt->rt6i_flags |= RTF_CACHE;
617 if (rt->rt6i_flags & RTF_REJECT)
618 rt->u.dst.error = ort->u.dst.error;
619 rt->u.dst.flags |= DST_HOST; 625 rt->u.dst.flags |= DST_HOST;
620 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); 626 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
621 } 627 }
@@ -697,6 +703,7 @@ out2:
697void ip6_route_input(struct sk_buff *skb) 703void ip6_route_input(struct sk_buff *skb)
698{ 704{
699 struct ipv6hdr *iph = skb->nh.ipv6h; 705 struct ipv6hdr *iph = skb->nh.ipv6h;
706 int flags = RT6_LOOKUP_F_HAS_SADDR;
700 struct flowi fl = { 707 struct flowi fl = {
701 .iif = skb->dev->ifindex, 708 .iif = skb->dev->ifindex,
702 .nl_u = { 709 .nl_u = {
@@ -711,7 +718,9 @@ void ip6_route_input(struct sk_buff *skb)
711 }, 718 },
712 .proto = iph->nexthdr, 719 .proto = iph->nexthdr,
713 }; 720 };
714 int flags = rt6_need_strict(&iph->daddr) ? RT6_LOOKUP_F_IFACE : 0; 721
722 if (rt6_need_strict(&iph->daddr))
723 flags |= RT6_LOOKUP_F_IFACE;
715 724
716 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input); 725 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input);
717} 726}
@@ -794,6 +803,9 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
794 if (rt6_need_strict(&fl->fl6_dst)) 803 if (rt6_need_strict(&fl->fl6_dst))
795 flags |= RT6_LOOKUP_F_IFACE; 804 flags |= RT6_LOOKUP_F_IFACE;
796 805
806 if (!ipv6_addr_any(&fl->fl6_src))
807 flags |= RT6_LOOKUP_F_HAS_SADDR;
808
797 return fib6_rule_lookup(fl, flags, ip6_pol_route_output); 809 return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
798} 810}
799 811
@@ -1345,6 +1357,7 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1345 struct in6_addr *gateway, 1357 struct in6_addr *gateway,
1346 struct net_device *dev) 1358 struct net_device *dev)
1347{ 1359{
1360 int flags = RT6_LOOKUP_F_HAS_SADDR;
1348 struct ip6rd_flowi rdfl = { 1361 struct ip6rd_flowi rdfl = {
1349 .fl = { 1362 .fl = {
1350 .oif = dev->ifindex, 1363 .oif = dev->ifindex,
@@ -1357,7 +1370,9 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1357 }, 1370 },
1358 .gateway = *gateway, 1371 .gateway = *gateway,
1359 }; 1372 };
1360 int flags = rt6_need_strict(dest) ? RT6_LOOKUP_F_IFACE : 0; 1373
1374 if (rt6_need_strict(dest))
1375 flags |= RT6_LOOKUP_F_IFACE;
1361 1376
1362 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect); 1377 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect);
1363} 1378}
@@ -1527,6 +1542,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1527 rt->u.dst.output = ort->u.dst.output; 1542 rt->u.dst.output = ort->u.dst.output;
1528 1543
1529 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); 1544 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1545 rt->u.dst.error = ort->u.dst.error;
1530 rt->u.dst.dev = ort->u.dst.dev; 1546 rt->u.dst.dev = ort->u.dst.dev;
1531 if (rt->u.dst.dev) 1547 if (rt->u.dst.dev)
1532 dev_hold(rt->u.dst.dev); 1548 dev_hold(rt->u.dst.dev);
@@ -1730,24 +1746,50 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1730 * Drop the packet on the floor 1746 * Drop the packet on the floor
1731 */ 1747 */
1732 1748
1733static int ip6_pkt_discard(struct sk_buff *skb) 1749static inline int ip6_pkt_drop(struct sk_buff *skb, int code)
1734{ 1750{
1735 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr); 1751 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
1736 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) 1752 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
1737 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS); 1753 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS);
1738 1754
1739 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); 1755 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1740 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev); 1756 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
1741 kfree_skb(skb); 1757 kfree_skb(skb);
1742 return 0; 1758 return 0;
1743} 1759}
1744 1760
1761static int ip6_pkt_discard(struct sk_buff *skb)
1762{
1763 return ip6_pkt_drop(skb, ICMPV6_NOROUTE);
1764}
1765
1745static int ip6_pkt_discard_out(struct sk_buff *skb) 1766static int ip6_pkt_discard_out(struct sk_buff *skb)
1746{ 1767{
1747 skb->dev = skb->dst->dev; 1768 skb->dev = skb->dst->dev;
1748 return ip6_pkt_discard(skb); 1769 return ip6_pkt_discard(skb);
1749} 1770}
1750 1771
1772#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1773
1774static int ip6_pkt_prohibit(struct sk_buff *skb)
1775{
1776 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED);
1777}
1778
1779static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1780{
1781 skb->dev = skb->dst->dev;
1782 return ip6_pkt_prohibit(skb);
1783}
1784
1785static int ip6_pkt_blk_hole(struct sk_buff *skb)
1786{
1787 kfree_skb(skb);
1788 return 0;
1789}
1790
1791#endif
1792
1751/* 1793/*
1752 * Allocate a dst for local (unicast / anycast) address. 1794 * Allocate a dst for local (unicast / anycast) address.
1753 */ 1795 */
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 836eecd7e62b..b481a4d780c2 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -850,3 +850,7 @@ int __init sit_init(void)
850 inet_del_protocol(&sit_protocol, IPPROTO_IPV6); 850 inet_del_protocol(&sit_protocol, IPPROTO_IPV6);
851 goto out; 851 goto out;
852} 852}
853
854module_init(sit_init);
855module_exit(sit_cleanup);
856MODULE_LICENSE("GPL");
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2546fc9f0a78..4c2a7c0cafef 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -329,7 +329,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
329 } 329 }
330 330
331 if (sk->sk_state == TCP_TIME_WAIT) { 331 if (sk->sk_state == TCP_TIME_WAIT) {
332 inet_twsk_put((struct inet_timewait_sock *)sk); 332 inet_twsk_put(inet_twsk(sk));
333 return; 333 return;
334 } 334 }
335 335
@@ -653,7 +653,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
653 int tot_len = sizeof(struct tcphdr); 653 int tot_len = sizeof(struct tcphdr);
654 654
655 if (ts) 655 if (ts)
656 tot_len += 3*4; 656 tot_len += TCPOLEN_TSTAMP_ALIGNED;
657 657
658 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 658 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
659 GFP_ATOMIC); 659 GFP_ATOMIC);
@@ -749,7 +749,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
749 bh_lock_sock(nsk); 749 bh_lock_sock(nsk);
750 return nsk; 750 return nsk;
751 } 751 }
752 inet_twsk_put((struct inet_timewait_sock *)nsk); 752 inet_twsk_put(inet_twsk(nsk));
753 return NULL; 753 return NULL;
754 } 754 }
755 755
@@ -1237,7 +1237,7 @@ process:
1237 1237
1238 skb->dev = NULL; 1238 skb->dev = NULL;
1239 1239
1240 bh_lock_sock(sk); 1240 bh_lock_sock_nested(sk);
1241 ret = 0; 1241 ret = 0;
1242 if (!sock_owned_by_user(sk)) { 1242 if (!sock_owned_by_user(sk)) {
1243#ifdef CONFIG_NET_DMA 1243#ifdef CONFIG_NET_DMA
@@ -1283,18 +1283,17 @@ discard_and_relse:
1283 1283
1284do_time_wait: 1284do_time_wait:
1285 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1285 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1286 inet_twsk_put((struct inet_timewait_sock *)sk); 1286 inet_twsk_put(inet_twsk(sk));
1287 goto discard_it; 1287 goto discard_it;
1288 } 1288 }
1289 1289
1290 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1290 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1291 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1291 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1292 inet_twsk_put((struct inet_timewait_sock *)sk); 1292 inet_twsk_put(inet_twsk(sk));
1293 goto discard_it; 1293 goto discard_it;
1294 } 1294 }
1295 1295
1296 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1296 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1297 skb, th)) {
1298 case TCP_TW_SYN: 1297 case TCP_TW_SYN:
1299 { 1298 {
1300 struct sock *sk2; 1299 struct sock *sk2;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9662561701d1..e0c3934a7e4b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -546,7 +546,7 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
546 struct in6_addr *daddr, *final_p = NULL, final; 546 struct in6_addr *daddr, *final_p = NULL, final;
547 struct ipv6_txoptions *opt = NULL; 547 struct ipv6_txoptions *opt = NULL;
548 struct ip6_flowlabel *flowlabel = NULL; 548 struct ip6_flowlabel *flowlabel = NULL;
549 struct flowi *fl = &inet->cork.fl; 549 struct flowi fl;
550 struct dst_entry *dst; 550 struct dst_entry *dst;
551 int addr_len = msg->msg_namelen; 551 int addr_len = msg->msg_namelen;
552 int ulen = len; 552 int ulen = len;
@@ -626,19 +626,19 @@ do_udp_sendmsg:
626 } 626 }
627 ulen += sizeof(struct udphdr); 627 ulen += sizeof(struct udphdr);
628 628
629 memset(fl, 0, sizeof(*fl)); 629 memset(&fl, 0, sizeof(fl));
630 630
631 if (sin6) { 631 if (sin6) {
632 if (sin6->sin6_port == 0) 632 if (sin6->sin6_port == 0)
633 return -EINVAL; 633 return -EINVAL;
634 634
635 fl->fl_ip_dport = sin6->sin6_port; 635 fl.fl_ip_dport = sin6->sin6_port;
636 daddr = &sin6->sin6_addr; 636 daddr = &sin6->sin6_addr;
637 637
638 if (np->sndflow) { 638 if (np->sndflow) {
639 fl->fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 639 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
640 if (fl->fl6_flowlabel&IPV6_FLOWLABEL_MASK) { 640 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
641 flowlabel = fl6_sock_lookup(sk, fl->fl6_flowlabel); 641 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
642 if (flowlabel == NULL) 642 if (flowlabel == NULL)
643 return -EINVAL; 643 return -EINVAL;
644 daddr = &flowlabel->dst; 644 daddr = &flowlabel->dst;
@@ -656,32 +656,32 @@ do_udp_sendmsg:
656 if (addr_len >= sizeof(struct sockaddr_in6) && 656 if (addr_len >= sizeof(struct sockaddr_in6) &&
657 sin6->sin6_scope_id && 657 sin6->sin6_scope_id &&
658 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) 658 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
659 fl->oif = sin6->sin6_scope_id; 659 fl.oif = sin6->sin6_scope_id;
660 } else { 660 } else {
661 if (sk->sk_state != TCP_ESTABLISHED) 661 if (sk->sk_state != TCP_ESTABLISHED)
662 return -EDESTADDRREQ; 662 return -EDESTADDRREQ;
663 663
664 fl->fl_ip_dport = inet->dport; 664 fl.fl_ip_dport = inet->dport;
665 daddr = &np->daddr; 665 daddr = &np->daddr;
666 fl->fl6_flowlabel = np->flow_label; 666 fl.fl6_flowlabel = np->flow_label;
667 connected = 1; 667 connected = 1;
668 } 668 }
669 669
670 if (!fl->oif) 670 if (!fl.oif)
671 fl->oif = sk->sk_bound_dev_if; 671 fl.oif = sk->sk_bound_dev_if;
672 672
673 if (msg->msg_controllen) { 673 if (msg->msg_controllen) {
674 opt = &opt_space; 674 opt = &opt_space;
675 memset(opt, 0, sizeof(struct ipv6_txoptions)); 675 memset(opt, 0, sizeof(struct ipv6_txoptions));
676 opt->tot_len = sizeof(*opt); 676 opt->tot_len = sizeof(*opt);
677 677
678 err = datagram_send_ctl(msg, fl, opt, &hlimit, &tclass); 678 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
679 if (err < 0) { 679 if (err < 0) {
680 fl6_sock_release(flowlabel); 680 fl6_sock_release(flowlabel);
681 return err; 681 return err;
682 } 682 }
683 if ((fl->fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 683 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
684 flowlabel = fl6_sock_lookup(sk, fl->fl6_flowlabel); 684 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
685 if (flowlabel == NULL) 685 if (flowlabel == NULL)
686 return -EINVAL; 686 return -EINVAL;
687 } 687 }
@@ -695,39 +695,39 @@ do_udp_sendmsg:
695 opt = fl6_merge_options(&opt_space, flowlabel, opt); 695 opt = fl6_merge_options(&opt_space, flowlabel, opt);
696 opt = ipv6_fixup_options(&opt_space, opt); 696 opt = ipv6_fixup_options(&opt_space, opt);
697 697
698 fl->proto = IPPROTO_UDP; 698 fl.proto = IPPROTO_UDP;
699 ipv6_addr_copy(&fl->fl6_dst, daddr); 699 ipv6_addr_copy(&fl.fl6_dst, daddr);
700 if (ipv6_addr_any(&fl->fl6_src) && !ipv6_addr_any(&np->saddr)) 700 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
701 ipv6_addr_copy(&fl->fl6_src, &np->saddr); 701 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
702 fl->fl_ip_sport = inet->sport; 702 fl.fl_ip_sport = inet->sport;
703 703
704 /* merge ip6_build_xmit from ip6_output */ 704 /* merge ip6_build_xmit from ip6_output */
705 if (opt && opt->srcrt) { 705 if (opt && opt->srcrt) {
706 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 706 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
707 ipv6_addr_copy(&final, &fl->fl6_dst); 707 ipv6_addr_copy(&final, &fl.fl6_dst);
708 ipv6_addr_copy(&fl->fl6_dst, rt0->addr); 708 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
709 final_p = &final; 709 final_p = &final;
710 connected = 0; 710 connected = 0;
711 } 711 }
712 712
713 if (!fl->oif && ipv6_addr_is_multicast(&fl->fl6_dst)) { 713 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
714 fl->oif = np->mcast_oif; 714 fl.oif = np->mcast_oif;
715 connected = 0; 715 connected = 0;
716 } 716 }
717 717
718 security_sk_classify_flow(sk, fl); 718 security_sk_classify_flow(sk, &fl);
719 719
720 err = ip6_sk_dst_lookup(sk, &dst, fl); 720 err = ip6_sk_dst_lookup(sk, &dst, &fl);
721 if (err) 721 if (err)
722 goto out; 722 goto out;
723 if (final_p) 723 if (final_p)
724 ipv6_addr_copy(&fl->fl6_dst, final_p); 724 ipv6_addr_copy(&fl.fl6_dst, final_p);
725 725
726 if ((err = xfrm_lookup(&dst, fl, sk, 0)) < 0) 726 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
727 goto out; 727 goto out;
728 728
729 if (hlimit < 0) { 729 if (hlimit < 0) {
730 if (ipv6_addr_is_multicast(&fl->fl6_dst)) 730 if (ipv6_addr_is_multicast(&fl.fl6_dst))
731 hlimit = np->mcast_hops; 731 hlimit = np->mcast_hops;
732 else 732 else
733 hlimit = np->hop_limit; 733 hlimit = np->hop_limit;
@@ -763,21 +763,23 @@ back_from_confirm:
763do_append_data: 763do_append_data:
764 up->len += ulen; 764 up->len += ulen;
765 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen, 765 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
766 sizeof(struct udphdr), hlimit, tclass, opt, fl, 766 sizeof(struct udphdr), hlimit, tclass, opt, &fl,
767 (struct rt6_info*)dst, 767 (struct rt6_info*)dst,
768 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 768 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
769 if (err) 769 if (err)
770 udp_v6_flush_pending_frames(sk); 770 udp_v6_flush_pending_frames(sk);
771 else if (!corkreq) 771 else if (!corkreq)
772 err = udp_v6_push_pending_frames(sk, up); 772 err = udp_v6_push_pending_frames(sk, up);
773 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
774 up->pending = 0;
773 775
774 if (dst) { 776 if (dst) {
775 if (connected) { 777 if (connected) {
776 ip6_dst_store(sk, dst, 778 ip6_dst_store(sk, dst,
777 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? 779 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
778 &np->daddr : NULL, 780 &np->daddr : NULL,
779#ifdef CONFIG_IPV6_SUBTREES 781#ifdef CONFIG_IPV6_SUBTREES
780 ipv6_addr_equal(&fl->fl6_src, &np->saddr) ? 782 ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
781 &np->saddr : 783 &np->saddr :
782#endif 784#endif
783 NULL); 785 NULL);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index a40a05789013..5c8b7a568800 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -16,10 +16,10 @@
16#include <net/ipv6.h> 16#include <net/ipv6.h>
17#include <net/xfrm.h> 17#include <net/xfrm.h>
18 18
19int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi) 19int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
20{ 20{
21 int err; 21 int err;
22 u32 seq; 22 __be32 seq;
23 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; 23 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
24 struct xfrm_state *x; 24 struct xfrm_state *x;
25 int xfrm_nr = 0; 25 int xfrm_nr = 0;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
new file mode 100644
index 000000000000..edcfffa9e87b
--- /dev/null
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -0,0 +1,107 @@
1/*
2 * xfrm6_mode_beet.c - BEET mode encapsulation for IPv6.
3 *
4 * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com>
5 * Miika Komu <miika@iki.fi>
6 * Herbert Xu <herbert@gondor.apana.org.au>
7 * Abhinav Pathak <abhinav.pathak@hiit.fi>
8 * Jeff Ahrenholz <ahrenholz@gmail.com>
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/stringify.h>
16#include <net/dsfield.h>
17#include <net/dst.h>
18#include <net/inet_ecn.h>
19#include <net/ipv6.h>
20#include <net/xfrm.h>
21
22/* Add encapsulation header.
23 *
24 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
25 * The following fields in it shall be filled in by x->type->output:
26 * payload_len
27 *
28 * On exit, skb->h will be set to the start of the encapsulation header to be
29 * filled in by x->type->output and skb->nh will be set to the nextheader field
30 * of the extension header directly preceding the encapsulation header, or in
31 * its absence, that of the top IP header. The value of skb->data will always
32 * point to the top IP header.
33 */
34static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
35{
36 struct ipv6hdr *iph, *top_iph;
37 u8 *prevhdr;
38 int hdr_len;
39
40 skb_push(skb, x->props.header_len);
41 iph = skb->nh.ipv6h;
42
43 hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
44 skb->nh.raw = prevhdr - x->props.header_len;
45 skb->h.raw = skb->data + hdr_len;
46 memmove(skb->data, iph, hdr_len);
47
48 skb->nh.raw = skb->data;
49 top_iph = skb->nh.ipv6h;
50 skb->nh.raw = &top_iph->nexthdr;
51 skb->h.ipv6h = top_iph + 1;
52
53 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
54 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
55
56 return 0;
57}
58
59static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
60{
61 struct ipv6hdr *ip6h;
62 int size = sizeof(struct ipv6hdr);
63 int err = -EINVAL;
64
65 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
66 goto out;
67
68 skb_push(skb, size);
69 memmove(skb->data, skb->nh.raw, size);
70 skb->nh.raw = skb->data;
71
72 skb->mac.raw = memmove(skb->data - skb->mac_len,
73 skb->mac.raw, skb->mac_len);
74
75 ip6h = skb->nh.ipv6h;
76 ip6h->payload_len = htons(skb->len - size);
77 ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
78 ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
79 err = 0;
80out:
81 return err;
82}
83
84static struct xfrm_mode xfrm6_beet_mode = {
85 .input = xfrm6_beet_input,
86 .output = xfrm6_beet_output,
87 .owner = THIS_MODULE,
88 .encap = XFRM_MODE_BEET,
89};
90
91static int __init xfrm6_beet_init(void)
92{
93 return xfrm_register_mode(&xfrm6_beet_mode, AF_INET6);
94}
95
96static void __exit xfrm6_beet_exit(void)
97{
98 int err;
99
100 err = xfrm_unregister_mode(&xfrm6_beet_mode, AF_INET6);
101 BUG_ON(err);
102}
103
104module_init(xfrm6_beet_init);
105module_exit(xfrm6_beet_exit);
106MODULE_LICENSE("GPL");
107MODULE_ALIAS_XFRM_MODE(AF_INET6, XFRM_MODE_BEET);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6a252e2134d1..d400f8fae129 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -25,12 +25,14 @@
25static struct dst_ops xfrm6_dst_ops; 25static struct dst_ops xfrm6_dst_ops;
26static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 26static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
27 27
28static int xfrm6_dst_lookup(struct xfrm_dst **dst, struct flowi *fl) 28static int xfrm6_dst_lookup(struct xfrm_dst **xdst, struct flowi *fl)
29{ 29{
30 int err = 0; 30 struct dst_entry *dst = ip6_route_output(NULL, fl);
31 *dst = (struct xfrm_dst*)ip6_route_output(NULL, fl); 31 int err = dst->error;
32 if (!*dst) 32 if (!err)
33 err = -ENETUNREACH; 33 *xdst = (struct xfrm_dst *) dst;
34 else
35 dst_release(dst);
34 return err; 36 return err;
35} 37}
36 38
@@ -73,7 +75,7 @@ __xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
73 xdst->u.rt6.rt6i_src.plen); 75 xdst->u.rt6.rt6i_src.plen);
74 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && 76 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
75 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && 77 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
76 xfrm_bundle_ok(xdst, fl, AF_INET6, 78 xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
77 (xdst->u.rt6.rt6i_dst.plen != 128 || 79 (xdst->u.rt6.rt6i_dst.plen != 128 ||
78 xdst->u.rt6.rt6i_src.plen != 128))) { 80 xdst->u.rt6.rt6i_src.plen != 128))) {
79 dst_clone(dst); 81 dst_clone(dst);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 711bfafb2472..9ddaa9d41539 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -29,9 +29,9 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
29 ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); 29 ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
30 ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); 30 ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
31 x->sel.dport = xfrm_flowi_dport(fl); 31 x->sel.dport = xfrm_flowi_dport(fl);
32 x->sel.dport_mask = ~0; 32 x->sel.dport_mask = htons(0xffff);
33 x->sel.sport = xfrm_flowi_sport(fl); 33 x->sel.sport = xfrm_flowi_sport(fl);
34 x->sel.sport_mask = ~0; 34 x->sel.sport_mask = htons(0xffff);
35 x->sel.prefixlen_d = 128; 35 x->sel.prefixlen_d = 128;
36 x->sel.prefixlen_s = 128; 36 x->sel.prefixlen_s = 128;
37 x->sel.proto = fl->proto; 37 x->sel.proto = fl->proto;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 59685ee8f700..7af227bb1551 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -258,7 +258,7 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
258static int xfrm6_tunnel_rcv(struct sk_buff *skb) 258static int xfrm6_tunnel_rcv(struct sk_buff *skb)
259{ 259{
260 struct ipv6hdr *iph = skb->nh.ipv6h; 260 struct ipv6hdr *iph = skb->nh.ipv6h;
261 u32 spi; 261 __be32 spi;
262 262
263 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 263 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
264 return xfrm6_rcv_spi(skb, spi); 264 return xfrm6_rcv_spi(skb, spi);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 17699eeb64d7..7e1aea89ef05 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -132,13 +132,14 @@ static void irda_disconnect_indication(void *instance, void *sap,
132 132
133 /* Prevent race conditions with irda_release() and irda_shutdown() */ 133 /* Prevent race conditions with irda_release() and irda_shutdown() */
134 if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) { 134 if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
135 lock_sock(sk);
135 sk->sk_state = TCP_CLOSE; 136 sk->sk_state = TCP_CLOSE;
136 sk->sk_err = ECONNRESET; 137 sk->sk_err = ECONNRESET;
137 sk->sk_shutdown |= SEND_SHUTDOWN; 138 sk->sk_shutdown |= SEND_SHUTDOWN;
138 139
139 sk->sk_state_change(sk); 140 sk->sk_state_change(sk);
140 /* Uh-oh... Should use sock_orphan ? */ 141 sock_orphan(sk);
141 sock_set_flag(sk, SOCK_DEAD); 142 release_sock(sk);
142 143
143 /* Close our TSAP. 144 /* Close our TSAP.
144 * If we leave it open, IrLMP put it back into the list of 145 * If we leave it open, IrLMP put it back into the list of
@@ -308,7 +309,8 @@ static void irda_connect_response(struct irda_sock *self)
308 309
309 IRDA_ASSERT(self != NULL, return;); 310 IRDA_ASSERT(self != NULL, return;);
310 311
311 skb = alloc_skb(64, GFP_ATOMIC); 312 skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
313 GFP_ATOMIC);
312 if (skb == NULL) { 314 if (skb == NULL) {
313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 315 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
314 __FUNCTION__); 316 __FUNCTION__);
@@ -1212,6 +1214,7 @@ static int irda_release(struct socket *sock)
1212 if (sk == NULL) 1214 if (sk == NULL)
1213 return 0; 1215 return 0;
1214 1216
1217 lock_sock(sk);
1215 sk->sk_state = TCP_CLOSE; 1218 sk->sk_state = TCP_CLOSE;
1216 sk->sk_shutdown |= SEND_SHUTDOWN; 1219 sk->sk_shutdown |= SEND_SHUTDOWN;
1217 sk->sk_state_change(sk); 1220 sk->sk_state_change(sk);
@@ -1221,6 +1224,7 @@ static int irda_release(struct socket *sock)
1221 1224
1222 sock_orphan(sk); 1225 sock_orphan(sk);
1223 sock->sk = NULL; 1226 sock->sk = NULL;
1227 release_sock(sk);
1224 1228
1225 /* Purge queues (see sock_init_data()) */ 1229 /* Purge queues (see sock_init_data()) */
1226 skb_queue_purge(&sk->sk_receive_queue); 1230 skb_queue_purge(&sk->sk_receive_queue);
@@ -1353,6 +1357,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1353 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1357 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1354 1358
1355 IRDA_ASSERT(self != NULL, return -1;); 1359 IRDA_ASSERT(self != NULL, return -1;);
1360 IRDA_ASSERT(!sock_error(sk), return -1;);
1356 1361
1357 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1362 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1358 flags & MSG_DONTWAIT, &err); 1363 flags & MSG_DONTWAIT, &err);
@@ -1405,6 +1410,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1405 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1410 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
1406 1411
1407 IRDA_ASSERT(self != NULL, return -1;); 1412 IRDA_ASSERT(self != NULL, return -1;);
1413 IRDA_ASSERT(!sock_error(sk), return -1;);
1408 1414
1409 if (sock->flags & __SO_ACCEPTCON) 1415 if (sock->flags & __SO_ACCEPTCON)
1410 return(-EINVAL); 1416 return(-EINVAL);
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 959874b6451f..c8e0d89ee11f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -81,7 +81,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
81 81
82 /* Any userdata supplied? */ 82 /* Any userdata supplied? */
83 if (userdata == NULL) { 83 if (userdata == NULL) {
84 tx_skb = alloc_skb(64, GFP_ATOMIC); 84 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
85 if (!tx_skb) 85 if (!tx_skb)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
@@ -115,7 +115,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
116 116
117 if (!userdata) { 117 if (!userdata) {
118 tx_skb = alloc_skb(64, GFP_ATOMIC); 118 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
119 if (!tx_skb) 119 if (!tx_skb)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 3bcdb467efc5..d50a02030ad7 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -79,7 +79,7 @@ static struct tty_driver *driver;
79 79
80hashbin_t *ircomm_tty = NULL; 80hashbin_t *ircomm_tty = NULL;
81 81
82static struct tty_operations ops = { 82static const struct tty_operations ops = {
83 .open = ircomm_tty_open, 83 .open = ircomm_tty_open,
84 .close = ircomm_tty_close, 84 .close = ircomm_tty_close,
85 .write = ircomm_tty_write, 85 .write = ircomm_tty_write,
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 61128aa05b40..415cf4eec23b 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -345,10 +345,11 @@ static void iriap_disconnect_request(struct iriap_cb *self)
345 IRDA_ASSERT(self != NULL, return;); 345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
347 347
348 tx_skb = alloc_skb(64, GFP_ATOMIC); 348 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
349 if (tx_skb == NULL) { 349 if (tx_skb == NULL) {
350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 350 IRDA_DEBUG(0,
351 __FUNCTION__, 64); 351 "%s(), Could not allocate an sk_buff of length %d\n",
352 __FUNCTION__, LMP_MAX_HEADER);
352 return; 353 return;
353 } 354 }
354 355
@@ -701,7 +702,7 @@ void iriap_send_ack(struct iriap_cb *self)
701 IRDA_ASSERT(self != NULL, return;); 702 IRDA_ASSERT(self != NULL, return;);
702 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 703 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
703 704
704 tx_skb = alloc_skb(64, GFP_ATOMIC); 705 tx_skb = alloc_skb(LMP_MAX_HEADER + 1, GFP_ATOMIC);
705 if (!tx_skb) 706 if (!tx_skb)
706 return; 707 return;
707 708
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index da17395df05a..99b18dc7a0b7 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -365,7 +365,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
365 365
366 switch (event) { 366 switch (event) {
367 case IAP_LM_CONNECT_INDICATION: 367 case IAP_LM_CONNECT_INDICATION:
368 tx_skb = alloc_skb(64, GFP_ATOMIC); 368 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
369 if (tx_skb == NULL) { 369 if (tx_skb == NULL) {
370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);
371 return; 371 return;
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index a154b1d71c0f..56292ab7d652 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -43,7 +43,7 @@ struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}};
43 * 43 *
44 * Faster, check boundary... Jean II 44 * Faster, check boundary... Jean II
45 */ 45 */
46static char *strndup(char *str, int max) 46static char *strndup(char *str, size_t max)
47{ 47{
48 char *new_str; 48 char *new_str;
49 int len; 49 int len;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 7dd0a2fe1d20..9b962f247714 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -636,7 +636,8 @@ void irlan_get_provider_info(struct irlan_cb *self)
636 IRDA_ASSERT(self != NULL, return;); 636 IRDA_ASSERT(self != NULL, return;);
637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
638 638
639 skb = alloc_skb(64, GFP_ATOMIC); 639 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER,
640 GFP_ATOMIC);
640 if (!skb) 641 if (!skb)
641 return; 642 return;
642 643
@@ -668,7 +669,10 @@ void irlan_open_data_channel(struct irlan_cb *self)
668 IRDA_ASSERT(self != NULL, return;); 669 IRDA_ASSERT(self != NULL, return;);
669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 670 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
670 671
671 skb = alloc_skb(64, GFP_ATOMIC); 672 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
673 IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3") +
674 IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "DIRECT"),
675 GFP_ATOMIC);
672 if (!skb) 676 if (!skb)
673 return; 677 return;
674 678
@@ -704,7 +708,9 @@ void irlan_close_data_channel(struct irlan_cb *self)
704 if (self->client.tsap_ctrl == NULL) 708 if (self->client.tsap_ctrl == NULL)
705 return; 709 return;
706 710
707 skb = alloc_skb(64, GFP_ATOMIC); 711 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
712 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN"),
713 GFP_ATOMIC);
708 if (!skb) 714 if (!skb)
709 return; 715 return;
710 716
@@ -715,7 +721,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
715 721
716 /* Build frame */ 722 /* Build frame */
717 frame[0] = CMD_CLOSE_DATA_CHAN; 723 frame[0] = CMD_CLOSE_DATA_CHAN;
718 frame[1] = 0x01; /* Two parameters */ 724 frame[1] = 0x01; /* One parameter */
719 725
720 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data); 726 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
721 727
@@ -739,7 +745,11 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
739 IRDA_ASSERT(self != NULL, return;); 745 IRDA_ASSERT(self != NULL, return;);
740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 746 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
741 747
742 skb = alloc_skb(128, GFP_ATOMIC); 748 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
749 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
750 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
751 IRLAN_STRING_PARAMETER_LEN("FILTER_MODE", "FILTER"),
752 GFP_ATOMIC);
743 if (!skb) 753 if (!skb)
744 return; 754 return;
745 755
@@ -777,7 +787,12 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
777 IRDA_ASSERT(self != NULL, return;); 787 IRDA_ASSERT(self != NULL, return;);
778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 788 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
779 789
780 skb = alloc_skb(128, GFP_ATOMIC); 790 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
791 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
792 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
793 /* We may waste one byte here...*/
794 IRLAN_STRING_PARAMETER_LEN("FILTER_MODE", "FILTER"),
795 GFP_ATOMIC);
781 if (!skb) 796 if (!skb)
782 return; 797 return;
783 798
@@ -816,7 +831,12 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
816 IRDA_ASSERT(self != NULL, return;); 831 IRDA_ASSERT(self != NULL, return;);
817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 832 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
818 833
819 skb = alloc_skb(128, GFP_ATOMIC); 834 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
835 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
836 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
837 /* We may waste one byte here...*/
838 IRLAN_STRING_PARAMETER_LEN("FILTER_MODE", "NONE"),
839 GFP_ATOMIC);
820 if (!skb) 840 if (!skb)
821 return; 841 return;
822 842
@@ -856,7 +876,12 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
856 IRDA_ASSERT(self != NULL, return;); 876 IRDA_ASSERT(self != NULL, return;);
857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 877 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
858 878
859 skb = alloc_skb(128, GFP_ATOMIC); 879 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
880 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
881 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
882 IRLAN_STRING_PARAMETER_LEN("FILTER_OPERATION",
883 "DYNAMIC"),
884 GFP_ATOMIC);
860 if (!skb) 885 if (!skb)
861 return; 886 return;
862 887
@@ -891,7 +916,10 @@ void irlan_get_media_char(struct irlan_cb *self)
891 IRDA_ASSERT(self != NULL, return;); 916 IRDA_ASSERT(self != NULL, return;);
892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 917 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
893 918
894 skb = alloc_skb(64, GFP_ATOMIC); 919 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
920 IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3"),
921 GFP_ATOMIC);
922
895 if (!skb) 923 if (!skb)
896 return; 924 return;
897 925
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 9c0df86044d7..58efde919667 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,14 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
296 IRDA_ASSERT(self != NULL, return;); 296 IRDA_ASSERT(self != NULL, return;);
297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
298 298
299 skb = alloc_skb(128, GFP_ATOMIC); 299 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
300 /* Bigger param length comes from CMD_GET_MEDIA_CHAR */
301 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
302 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BORADCAST") +
303 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
304 IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
305 GFP_ATOMIC);
306
300 if (!skb) 307 if (!skb)
301 return; 308 return;
302 309
@@ -354,8 +361,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
354 } else 361 } else
355 skb->data[1] = 0x02; /* 2 parameters */ 362 skb->data[1] = 0x02; /* 2 parameters */
356 irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data); 363 irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data);
357 irlan_insert_array_param(skb, "RECONNECT_KEY", "LINUX RULES!", 364 irlan_insert_string_param(skb, "RECONNECT_KEY", "LINUX RULES!");
358 12);
359 break; 365 break;
360 case CMD_FILTER_OPERATION: 366 case CMD_FILTER_OPERATION:
361 irlan_filter_request(self, skb); 367 irlan_filter_request(self, skb);
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index ccb983bf0f4a..dba349c832d0 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -117,7 +117,9 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
118 118
119 /* Allocate frame */ 119 /* Allocate frame */
120 tx_skb = alloc_skb(64, GFP_ATOMIC); 120 tx_skb = alloc_skb(sizeof(struct snrm_frame) +
121 IRLAP_NEGOCIATION_PARAMS_LEN,
122 GFP_ATOMIC);
121 if (!tx_skb) 123 if (!tx_skb)
122 return; 124 return;
123 125
@@ -136,7 +138,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
136 * If we are establishing a connection then insert QoS paramerters 138 * If we are establishing a connection then insert QoS paramerters
137 */ 139 */
138 if (qos) { 140 if (qos) {
139 skb_put(tx_skb, 9); /* 21 left */ 141 skb_put(tx_skb, 9); /* 25 left */
140 frame->saddr = cpu_to_le32(self->saddr); 142 frame->saddr = cpu_to_le32(self->saddr);
141 frame->daddr = cpu_to_le32(self->daddr); 143 frame->daddr = cpu_to_le32(self->daddr);
142 144
@@ -210,7 +212,9 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 212 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
211 213
212 /* Allocate frame */ 214 /* Allocate frame */
213 tx_skb = alloc_skb(64, GFP_ATOMIC); 215 tx_skb = alloc_skb(sizeof(struct ua_frame) +
216 IRLAP_NEGOCIATION_PARAMS_LEN,
217 GFP_ATOMIC);
214 if (!tx_skb) 218 if (!tx_skb)
215 return; 219 return;
216 220
@@ -245,23 +249,23 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
245void irlap_send_dm_frame( struct irlap_cb *self) 249void irlap_send_dm_frame( struct irlap_cb *self)
246{ 250{
247 struct sk_buff *tx_skb = NULL; 251 struct sk_buff *tx_skb = NULL;
248 __u8 *frame; 252 struct dm_frame *frame;
249 253
250 IRDA_ASSERT(self != NULL, return;); 254 IRDA_ASSERT(self != NULL, return;);
251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 255 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
252 256
253 tx_skb = alloc_skb(32, GFP_ATOMIC); 257 tx_skb = alloc_skb(sizeof(struct dm_frame), GFP_ATOMIC);
254 if (!tx_skb) 258 if (!tx_skb)
255 return; 259 return;
256 260
257 frame = skb_put(tx_skb, 2); 261 frame = (struct dm_frame *)skb_put(tx_skb, 2);
258 262
259 if (self->state == LAP_NDM) 263 if (self->state == LAP_NDM)
260 frame[0] = CBROADCAST; 264 frame->caddr = CBROADCAST;
261 else 265 else
262 frame[0] = self->caddr; 266 frame->caddr = self->caddr;
263 267
264 frame[1] = DM_RSP | PF_BIT; 268 frame->control = DM_RSP | PF_BIT;
265 269
266 irlap_queue_xmit(self, tx_skb); 270 irlap_queue_xmit(self, tx_skb);
267} 271}
@@ -275,21 +279,21 @@ void irlap_send_dm_frame( struct irlap_cb *self)
275void irlap_send_disc_frame(struct irlap_cb *self) 279void irlap_send_disc_frame(struct irlap_cb *self)
276{ 280{
277 struct sk_buff *tx_skb = NULL; 281 struct sk_buff *tx_skb = NULL;
278 __u8 *frame; 282 struct disc_frame *frame;
279 283
280 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 284 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
281 285
282 IRDA_ASSERT(self != NULL, return;); 286 IRDA_ASSERT(self != NULL, return;);
283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 287 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
284 288
285 tx_skb = alloc_skb(16, GFP_ATOMIC); 289 tx_skb = alloc_skb(sizeof(struct disc_frame), GFP_ATOMIC);
286 if (!tx_skb) 290 if (!tx_skb)
287 return; 291 return;
288 292
289 frame = skb_put(tx_skb, 2); 293 frame = (struct disc_frame *)skb_put(tx_skb, 2);
290 294
291 frame[0] = self->caddr | CMD_FRAME; 295 frame->caddr = self->caddr | CMD_FRAME;
292 frame[1] = DISC_CMD | PF_BIT; 296 frame->control = DISC_CMD | PF_BIT;
293 297
294 irlap_queue_xmit(self, tx_skb); 298 irlap_queue_xmit(self, tx_skb);
295} 299}
@@ -315,7 +319,8 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 319 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316 IRDA_ASSERT(discovery != NULL, return;); 320 IRDA_ASSERT(discovery != NULL, return;);
317 321
318 tx_skb = alloc_skb(64, GFP_ATOMIC); 322 tx_skb = alloc_skb(sizeof(struct xid_frame) + IRLAP_DISCOVERY_INFO_LEN,
323 GFP_ATOMIC);
319 if (!tx_skb) 324 if (!tx_skb)
320 return; 325 return;
321 326
@@ -573,18 +578,18 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
573void irlap_send_rr_frame(struct irlap_cb *self, int command) 578void irlap_send_rr_frame(struct irlap_cb *self, int command)
574{ 579{
575 struct sk_buff *tx_skb; 580 struct sk_buff *tx_skb;
576 __u8 *frame; 581 struct rr_frame *frame;
577 582
578 tx_skb = alloc_skb(16, GFP_ATOMIC); 583 tx_skb = alloc_skb(sizeof(struct rr_frame), GFP_ATOMIC);
579 if (!tx_skb) 584 if (!tx_skb)
580 return; 585 return;
581 586
582 frame = skb_put(tx_skb, 2); 587 frame = (struct rr_frame *)skb_put(tx_skb, 2);
583 588
584 frame[0] = self->caddr; 589 frame->caddr = self->caddr;
585 frame[0] |= (command) ? CMD_FRAME : 0; 590 frame->caddr |= (command) ? CMD_FRAME : 0;
586 591
587 frame[1] = RR | PF_BIT | (self->vr << 5); 592 frame->control = RR | PF_BIT | (self->vr << 5);
588 593
589 irlap_queue_xmit(self, tx_skb); 594 irlap_queue_xmit(self, tx_skb);
590} 595}
@@ -598,16 +603,16 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
598void irlap_send_rd_frame(struct irlap_cb *self) 603void irlap_send_rd_frame(struct irlap_cb *self)
599{ 604{
600 struct sk_buff *tx_skb; 605 struct sk_buff *tx_skb;
601 __u8 *frame; 606 struct rd_frame *frame;
602 607
603 tx_skb = alloc_skb(16, GFP_ATOMIC); 608 tx_skb = alloc_skb(sizeof(struct rd_frame), GFP_ATOMIC);
604 if (!tx_skb) 609 if (!tx_skb)
605 return; 610 return;
606 611
607 frame = skb_put(tx_skb, 2); 612 frame = (struct rd_frame *)skb_put(tx_skb, 2);
608 613
609 frame[0] = self->caddr; 614 frame->caddr = self->caddr;
610 frame[1] = RD_RSP | PF_BIT; 615 frame->caddr = RD_RSP | PF_BIT;
611 616
612 irlap_queue_xmit(self, tx_skb); 617 irlap_queue_xmit(self, tx_skb);
613} 618}
@@ -1214,7 +1219,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
1214 struct test_frame *frame; 1219 struct test_frame *frame;
1215 __u8 *info; 1220 __u8 *info;
1216 1221
1217 tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC); 1222 tx_skb = alloc_skb(cmd->len + sizeof(struct test_frame), GFP_ATOMIC);
1218 if (!tx_skb) 1223 if (!tx_skb)
1219 return; 1224 return;
1220 1225
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index c440913dee14..5073261b9d0c 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -392,7 +392,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
392 392
393 /* Any userdata? */ 393 /* Any userdata? */
394 if (tx_skb == NULL) { 394 if (tx_skb == NULL) {
395 tx_skb = alloc_skb(64, GFP_ATOMIC); 395 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
396 if (!tx_skb) 396 if (!tx_skb)
397 return -ENOMEM; 397 return -ENOMEM;
398 398
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 42acf1cde737..3c2e70b77df1 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -804,12 +804,12 @@ static inline void irttp_give_credit(struct tsap_cb *self)
804 self->send_credit, self->avail_credit, self->remote_credit); 804 self->send_credit, self->avail_credit, self->remote_credit);
805 805
806 /* Give credit to peer */ 806 /* Give credit to peer */
807 tx_skb = alloc_skb(64, GFP_ATOMIC); 807 tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
808 if (!tx_skb) 808 if (!tx_skb)
809 return; 809 return;
810 810
811 /* Reserve space for LMP, and LAP header */ 811 /* Reserve space for LMP, and LAP header */
812 skb_reserve(tx_skb, self->max_header_size); 812 skb_reserve(tx_skb, LMP_MAX_HEADER);
813 813
814 /* 814 /*
815 * Since we can transmit and receive frames concurrently, 815 * Since we can transmit and receive frames concurrently,
@@ -1093,7 +1093,8 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1093 1093
1094 /* Any userdata supplied? */ 1094 /* Any userdata supplied? */
1095 if (userdata == NULL) { 1095 if (userdata == NULL) {
1096 tx_skb = alloc_skb(64, GFP_ATOMIC); 1096 tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
1097 GFP_ATOMIC);
1097 if (!tx_skb) 1098 if (!tx_skb)
1098 return -ENOMEM; 1099 return -ENOMEM;
1099 1100
@@ -1341,7 +1342,8 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1341 1342
1342 /* Any userdata supplied? */ 1343 /* Any userdata supplied? */
1343 if (userdata == NULL) { 1344 if (userdata == NULL) {
1344 tx_skb = alloc_skb(64, GFP_ATOMIC); 1345 tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
1346 GFP_ATOMIC);
1345 if (!tx_skb) 1347 if (!tx_skb)
1346 return -ENOMEM; 1348 return -ENOMEM;
1347 1349
@@ -1540,14 +1542,14 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1540 1542
1541 if (!userdata) { 1543 if (!userdata) {
1542 struct sk_buff *tx_skb; 1544 struct sk_buff *tx_skb;
1543 tx_skb = alloc_skb(64, GFP_ATOMIC); 1545 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
1544 if (!tx_skb) 1546 if (!tx_skb)
1545 return -ENOMEM; 1547 return -ENOMEM;
1546 1548
1547 /* 1549 /*
1548 * Reserve space for MUX and LAP header 1550 * Reserve space for MUX and LAP header
1549 */ 1551 */
1550 skb_reserve(tx_skb, TTP_MAX_HEADER); 1552 skb_reserve(tx_skb, LMP_MAX_HEADER);
1551 1553
1552 userdata = tx_skb; 1554 userdata = tx_skb;
1553 } 1555 }
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 83b443ddc72f..20ff7cca1d07 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2140,7 +2140,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2140 xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2140 xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2141 xp->selector.sport = ((struct sockaddr_in *)(sa+1))->sin_port; 2141 xp->selector.sport = ((struct sockaddr_in *)(sa+1))->sin_port;
2142 if (xp->selector.sport) 2142 if (xp->selector.sport)
2143 xp->selector.sport_mask = ~0; 2143 xp->selector.sport_mask = htons(0xffff);
2144 2144
2145 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2145 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1],
2146 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); 2146 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);
@@ -2153,7 +2153,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2153 2153
2154 xp->selector.dport = ((struct sockaddr_in *)(sa+1))->sin_port; 2154 xp->selector.dport = ((struct sockaddr_in *)(sa+1))->sin_port;
2155 if (xp->selector.dport) 2155 if (xp->selector.dport)
2156 xp->selector.dport_mask = ~0; 2156 xp->selector.dport_mask = htons(0xffff);
2157 2157
2158 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1]; 2158 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
2159 if (sec_ctx != NULL) { 2159 if (sec_ctx != NULL) {
@@ -2243,7 +2243,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2243 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2243 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2244 sel.sport = ((struct sockaddr_in *)(sa+1))->sin_port; 2244 sel.sport = ((struct sockaddr_in *)(sa+1))->sin_port;
2245 if (sel.sport) 2245 if (sel.sport)
2246 sel.sport_mask = ~0; 2246 sel.sport_mask = htons(0xffff);
2247 2247
2248 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2248 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1],
2249 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); 2249 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
@@ -2251,7 +2251,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2251 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2251 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2252 sel.dport = ((struct sockaddr_in *)(sa+1))->sin_port; 2252 sel.dport = ((struct sockaddr_in *)(sa+1))->sin_port;
2253 if (sel.dport) 2253 if (sel.dport)
2254 sel.dport_mask = ~0; 2254 sel.dport_mask = htons(0xffff);
2255 2255
2256 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1]; 2256 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
2257 memset(&tmp, 0, sizeof(struct xfrm_policy)); 2257 memset(&tmp, 0, sizeof(struct xfrm_policy));
@@ -2928,11 +2928,6 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
2928 if (*dir) 2928 if (*dir)
2929 goto out; 2929 goto out;
2930 } 2930 }
2931 else {
2932 *dir = security_xfrm_sock_policy_alloc(xp, sk);
2933 if (*dir)
2934 goto out;
2935 }
2936 2931
2937 *dir = pol->sadb_x_policy_dir-1; 2932 *dir = pol->sadb_x_policy_dir-1;
2938 return xp; 2933 return xp;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0a28d2c5c44f..f619c6527266 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -209,7 +209,9 @@ config NETFILTER_XT_TARGET_SECMARK
209 209
210config NETFILTER_XT_TARGET_CONNSECMARK 210config NETFILTER_XT_TARGET_CONNSECMARK
211 tristate '"CONNSECMARK" target support' 211 tristate '"CONNSECMARK" target support'
212 depends on NETFILTER_XTABLES && (NF_CONNTRACK_SECMARK || IP_NF_CONNTRACK_SECMARK) 212 depends on NETFILTER_XTABLES && \
213 ((NF_CONNTRACK && NF_CONNTRACK_SECMARK) || \
214 (IP_NF_CONNTRACK && IP_NF_CONNTRACK_SECMARK))
213 help 215 help
214 The CONNSECMARK target copies security markings from packets 216 The CONNSECMARK target copies security markings from packets
215 to connections, and restores security markings from connections 217 to connections, and restores security markings from connections
@@ -365,7 +367,7 @@ config NETFILTER_XT_MATCH_MULTIPORT
365 367
366config NETFILTER_XT_MATCH_PHYSDEV 368config NETFILTER_XT_MATCH_PHYSDEV
367 tristate '"physdev" match support' 369 tristate '"physdev" match support'
368 depends on NETFILTER_XTABLES && BRIDGE_NETFILTER 370 depends on NETFILTER_XTABLES && BRIDGE && BRIDGE_NETFILTER
369 help 371 help
370 Physdev packet matching matches against the physical bridge ports 372 Physdev packet matching matches against the physical bridge ports
371 the IP packet arrived on or will leave by. 373 the IP packet arrived on or will leave by.
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1721f7c78c77..bd0156a28ecd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -47,13 +47,6 @@ MODULE_LICENSE("GPL");
47 47
48static char __initdata version[] = "0.93"; 48static char __initdata version[] = "0.93";
49 49
50#if 0
51#define DEBUGP printk
52#else
53#define DEBUGP(format, args...)
54#endif
55
56
57static inline int 50static inline int
58ctnetlink_dump_tuples_proto(struct sk_buff *skb, 51ctnetlink_dump_tuples_proto(struct sk_buff *skb,
59 const struct nf_conntrack_tuple *tuple, 52 const struct nf_conntrack_tuple *tuple,
@@ -410,7 +403,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
410{ 403{
411 if (cb->args[1]) 404 if (cb->args[1])
412 nf_ct_put((struct nf_conn *)cb->args[1]); 405 nf_ct_put((struct nf_conn *)cb->args[1]);
413 DEBUGP("entered %s\n", __FUNCTION__);
414 return 0; 406 return 0;
415} 407}
416 408
@@ -425,9 +417,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
425 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 417 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
426 u_int8_t l3proto = nfmsg->nfgen_family; 418 u_int8_t l3proto = nfmsg->nfgen_family;
427 419
428 DEBUGP("entered %s, last bucket=%lu id=%u\n", __FUNCTION__,
429 cb->args[0], *id);
430
431 read_lock_bh(&nf_conntrack_lock); 420 read_lock_bh(&nf_conntrack_lock);
432 last = (struct nf_conn *)cb->args[1]; 421 last = (struct nf_conn *)cb->args[1];
433 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 422 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
@@ -471,7 +460,6 @@ out:
471 if (last) 460 if (last)
472 nf_ct_put(last); 461 nf_ct_put(last);
473 462
474 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
475 return skb->len; 463 return skb->len;
476} 464}
477 465
@@ -482,8 +470,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct nf_conntrack_tuple *tuple)
482 struct nf_conntrack_l3proto *l3proto; 470 struct nf_conntrack_l3proto *l3proto;
483 int ret = 0; 471 int ret = 0;
484 472
485 DEBUGP("entered %s\n", __FUNCTION__);
486
487 nfattr_parse_nested(tb, CTA_IP_MAX, attr); 473 nfattr_parse_nested(tb, CTA_IP_MAX, attr);
488 474
489 l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); 475 l3proto = nf_ct_l3proto_find_get(tuple->src.l3num);
@@ -493,8 +479,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct nf_conntrack_tuple *tuple)
493 479
494 nf_ct_l3proto_put(l3proto); 480 nf_ct_l3proto_put(l3proto);
495 481
496 DEBUGP("leaving\n");
497
498 return ret; 482 return ret;
499} 483}
500 484
@@ -510,8 +494,6 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
510 struct nf_conntrack_protocol *proto; 494 struct nf_conntrack_protocol *proto;
511 int ret = 0; 495 int ret = 0;
512 496
513 DEBUGP("entered %s\n", __FUNCTION__);
514
515 nfattr_parse_nested(tb, CTA_PROTO_MAX, attr); 497 nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
516 498
517 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) 499 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
@@ -538,8 +520,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple,
538 struct nfattr *tb[CTA_TUPLE_MAX]; 520 struct nfattr *tb[CTA_TUPLE_MAX];
539 int err; 521 int err;
540 522
541 DEBUGP("entered %s\n", __FUNCTION__);
542
543 memset(tuple, 0, sizeof(*tuple)); 523 memset(tuple, 0, sizeof(*tuple));
544 524
545 nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]); 525 nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]);
@@ -566,10 +546,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple,
566 else 546 else
567 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 547 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
568 548
569 NF_CT_DUMP_TUPLE(tuple);
570
571 DEBUGP("leaving\n");
572
573 return 0; 549 return 0;
574} 550}
575 551
@@ -586,8 +562,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
586 struct nfattr *tb[CTA_PROTONAT_MAX]; 562 struct nfattr *tb[CTA_PROTONAT_MAX];
587 struct ip_nat_protocol *npt; 563 struct ip_nat_protocol *npt;
588 564
589 DEBUGP("entered %s\n", __FUNCTION__);
590
591 nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr); 565 nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
592 566
593 if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat)) 567 if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
@@ -606,7 +580,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
606 580
607 ip_nat_proto_put(npt); 581 ip_nat_proto_put(npt);
608 582
609 DEBUGP("leaving\n");
610 return 0; 583 return 0;
611} 584}
612 585
@@ -622,8 +595,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
622 struct nfattr *tb[CTA_NAT_MAX]; 595 struct nfattr *tb[CTA_NAT_MAX];
623 int err; 596 int err;
624 597
625 DEBUGP("entered %s\n", __FUNCTION__);
626
627 memset(range, 0, sizeof(*range)); 598 memset(range, 0, sizeof(*range));
628 599
629 nfattr_parse_nested(tb, CTA_NAT_MAX, nat); 600 nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
@@ -649,7 +620,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
649 if (err < 0) 620 if (err < 0)
650 return err; 621 return err;
651 622
652 DEBUGP("leaving\n");
653 return 0; 623 return 0;
654} 624}
655#endif 625#endif
@@ -659,8 +629,6 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
659{ 629{
660 struct nfattr *tb[CTA_HELP_MAX]; 630 struct nfattr *tb[CTA_HELP_MAX];
661 631
662 DEBUGP("entered %s\n", __FUNCTION__);
663
664 nfattr_parse_nested(tb, CTA_HELP_MAX, attr); 632 nfattr_parse_nested(tb, CTA_HELP_MAX, attr);
665 633
666 if (!tb[CTA_HELP_NAME-1]) 634 if (!tb[CTA_HELP_NAME-1])
@@ -690,8 +658,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
690 u_int8_t u3 = nfmsg->nfgen_family; 658 u_int8_t u3 = nfmsg->nfgen_family;
691 int err = 0; 659 int err = 0;
692 660
693 DEBUGP("entered %s\n", __FUNCTION__);
694
695 if (nfattr_bad_size(cda, CTA_MAX, cta_min)) 661 if (nfattr_bad_size(cda, CTA_MAX, cta_min))
696 return -EINVAL; 662 return -EINVAL;
697 663
@@ -709,10 +675,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
709 return err; 675 return err;
710 676
711 h = nf_conntrack_find_get(&tuple, NULL); 677 h = nf_conntrack_find_get(&tuple, NULL);
712 if (!h) { 678 if (!h)
713 DEBUGP("tuple not found in conntrack hash\n");
714 return -ENOENT; 679 return -ENOENT;
715 }
716 680
717 ct = nf_ct_tuplehash_to_ctrack(h); 681 ct = nf_ct_tuplehash_to_ctrack(h);
718 682
@@ -727,7 +691,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
727 ct->timeout.function((unsigned long)ct); 691 ct->timeout.function((unsigned long)ct);
728 692
729 nf_ct_put(ct); 693 nf_ct_put(ct);
730 DEBUGP("leaving\n");
731 694
732 return 0; 695 return 0;
733} 696}
@@ -744,8 +707,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
744 u_int8_t u3 = nfmsg->nfgen_family; 707 u_int8_t u3 = nfmsg->nfgen_family;
745 int err = 0; 708 int err = 0;
746 709
747 DEBUGP("entered %s\n", __FUNCTION__);
748
749 if (nlh->nlmsg_flags & NLM_F_DUMP) { 710 if (nlh->nlmsg_flags & NLM_F_DUMP) {
750 u32 rlen; 711 u32 rlen;
751 712
@@ -779,11 +740,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
779 return err; 740 return err;
780 741
781 h = nf_conntrack_find_get(&tuple, NULL); 742 h = nf_conntrack_find_get(&tuple, NULL);
782 if (!h) { 743 if (!h)
783 DEBUGP("tuple not found in conntrack hash");
784 return -ENOENT; 744 return -ENOENT;
785 } 745
786 DEBUGP("tuple found\n");
787 ct = nf_ct_tuplehash_to_ctrack(h); 746 ct = nf_ct_tuplehash_to_ctrack(h);
788 747
789 err = -ENOMEM; 748 err = -ENOMEM;
@@ -804,7 +763,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
804 if (err < 0) 763 if (err < 0)
805 goto out; 764 goto out;
806 765
807 DEBUGP("leaving\n");
808 return 0; 766 return 0;
809 767
810free: 768free:
@@ -876,8 +834,6 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
876 char *helpname; 834 char *helpname;
877 int err; 835 int err;
878 836
879 DEBUGP("entered %s\n", __FUNCTION__);
880
881 if (!help) { 837 if (!help) {
882 /* FIXME: we need to reallocate and rehash */ 838 /* FIXME: we need to reallocate and rehash */
883 return -EBUSY; 839 return -EBUSY;
@@ -954,8 +910,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[])
954{ 910{
955 int err; 911 int err;
956 912
957 DEBUGP("entered %s\n", __FUNCTION__);
958
959 if (cda[CTA_HELP-1]) { 913 if (cda[CTA_HELP-1]) {
960 err = ctnetlink_change_helper(ct, cda); 914 err = ctnetlink_change_helper(ct, cda);
961 if (err < 0) 915 if (err < 0)
@@ -985,7 +939,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[])
985 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); 939 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
986#endif 940#endif
987 941
988 DEBUGP("all done\n");
989 return 0; 942 return 0;
990} 943}
991 944
@@ -997,8 +950,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
997 struct nf_conn *ct; 950 struct nf_conn *ct;
998 int err = -EINVAL; 951 int err = -EINVAL;
999 952
1000 DEBUGP("entered %s\n", __FUNCTION__);
1001
1002 ct = nf_conntrack_alloc(otuple, rtuple); 953 ct = nf_conntrack_alloc(otuple, rtuple);
1003 if (ct == NULL || IS_ERR(ct)) 954 if (ct == NULL || IS_ERR(ct))
1004 return -ENOMEM; 955 return -ENOMEM;
@@ -1028,7 +979,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
1028 add_timer(&ct->timeout); 979 add_timer(&ct->timeout);
1029 nf_conntrack_hash_insert(ct); 980 nf_conntrack_hash_insert(ct);
1030 981
1031 DEBUGP("conntrack with id %u inserted\n", ct->id);
1032 return 0; 982 return 0;
1033 983
1034err: 984err:
@@ -1046,8 +996,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1046 u_int8_t u3 = nfmsg->nfgen_family; 996 u_int8_t u3 = nfmsg->nfgen_family;
1047 int err = 0; 997 int err = 0;
1048 998
1049 DEBUGP("entered %s\n", __FUNCTION__);
1050
1051 if (nfattr_bad_size(cda, CTA_MAX, cta_min)) 999 if (nfattr_bad_size(cda, CTA_MAX, cta_min))
1052 return -EINVAL; 1000 return -EINVAL;
1053 1001
@@ -1071,7 +1019,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1071 1019
1072 if (h == NULL) { 1020 if (h == NULL) {
1073 write_unlock_bh(&nf_conntrack_lock); 1021 write_unlock_bh(&nf_conntrack_lock);
1074 DEBUGP("no such conntrack, create new\n");
1075 err = -ENOENT; 1022 err = -ENOENT;
1076 if (nlh->nlmsg_flags & NLM_F_CREATE) 1023 if (nlh->nlmsg_flags & NLM_F_CREATE)
1077 err = ctnetlink_create_conntrack(cda, &otuple, &rtuple); 1024 err = ctnetlink_create_conntrack(cda, &otuple, &rtuple);
@@ -1087,7 +1034,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1087 1034
1088 /* We manipulate the conntrack inside the global conntrack table lock, 1035 /* We manipulate the conntrack inside the global conntrack table lock,
1089 * so there's no need to increase the refcount */ 1036 * so there's no need to increase the refcount */
1090 DEBUGP("conntrack found\n");
1091 err = -EEXIST; 1037 err = -EEXIST;
1092 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) 1038 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
1093 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), cda); 1039 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), cda);
@@ -1268,8 +1214,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1268 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 1214 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
1269 u_int8_t l3proto = nfmsg->nfgen_family; 1215 u_int8_t l3proto = nfmsg->nfgen_family;
1270 1216
1271 DEBUGP("entered %s, last id=%llu\n", __FUNCTION__, *id);
1272
1273 read_lock_bh(&nf_conntrack_lock); 1217 read_lock_bh(&nf_conntrack_lock);
1274 list_for_each_prev(i, &nf_conntrack_expect_list) { 1218 list_for_each_prev(i, &nf_conntrack_expect_list) {
1275 exp = (struct nf_conntrack_expect *) i; 1219 exp = (struct nf_conntrack_expect *) i;
@@ -1287,8 +1231,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1287out: 1231out:
1288 read_unlock_bh(&nf_conntrack_lock); 1232 read_unlock_bh(&nf_conntrack_lock);
1289 1233
1290 DEBUGP("leaving, last id=%llu\n", *id);
1291
1292 return skb->len; 1234 return skb->len;
1293} 1235}
1294 1236
@@ -1308,8 +1250,6 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1308 u_int8_t u3 = nfmsg->nfgen_family; 1250 u_int8_t u3 = nfmsg->nfgen_family;
1309 int err = 0; 1251 int err = 0;
1310 1252
1311 DEBUGP("entered %s\n", __FUNCTION__);
1312
1313 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) 1253 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
1314 return -EINVAL; 1254 return -EINVAL;
1315 1255
@@ -1460,8 +1400,6 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3)
1460 struct nf_conn_help *help; 1400 struct nf_conn_help *help;
1461 int err = 0; 1401 int err = 0;
1462 1402
1463 DEBUGP("entered %s\n", __FUNCTION__);
1464
1465 /* caller guarantees that those three CTA_EXPECT_* exist */ 1403 /* caller guarantees that those three CTA_EXPECT_* exist */
1466 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1404 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1467 if (err < 0) 1405 if (err < 0)
@@ -1516,8 +1454,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1516 u_int8_t u3 = nfmsg->nfgen_family; 1454 u_int8_t u3 = nfmsg->nfgen_family;
1517 int err = 0; 1455 int err = 0;
1518 1456
1519 DEBUGP("entered %s\n", __FUNCTION__);
1520
1521 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) 1457 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
1522 return -EINVAL; 1458 return -EINVAL;
1523 1459
@@ -1546,8 +1482,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1546 err = ctnetlink_change_expect(exp, cda); 1482 err = ctnetlink_change_expect(exp, cda);
1547 write_unlock_bh(&nf_conntrack_lock); 1483 write_unlock_bh(&nf_conntrack_lock);
1548 1484
1549 DEBUGP("leaving\n");
1550
1551 return err; 1485 return err;
1552} 1486}
1553 1487
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index db9b896e57c8..39e117502bd7 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -68,7 +68,7 @@ static int __init xt_nfqueue_init(void)
68 68
69static void __exit xt_nfqueue_fini(void) 69static void __exit xt_nfqueue_fini(void)
70{ 70{
71 xt_register_targets(xt_nfqueue_target, ARRAY_SIZE(xt_nfqueue_target)); 71 xt_unregister_targets(xt_nfqueue_target, ARRAY_SIZE(xt_nfqueue_target));
72} 72}
73 73
74module_init(xt_nfqueue_init); 74module_init(xt_nfqueue_init);
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 92a5726ef237..a8f03057dbde 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -147,7 +147,7 @@ static int __init xt_connmark_init(void)
147 147
148static void __exit xt_connmark_fini(void) 148static void __exit xt_connmark_fini(void)
149{ 149{
150 xt_register_matches(xt_connmark_match, ARRAY_SIZE(xt_connmark_match)); 150 xt_unregister_matches(xt_connmark_match, ARRAY_SIZE(xt_connmark_match));
151} 151}
152 152
153module_init(xt_connmark_init); 153module_init(xt_connmark_init);
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 4125a55f469f..a6ce1d6d5c59 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -32,6 +32,7 @@
32#include <linux/socket.h> 32#include <linux/socket.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/audit.h>
35#include <net/sock.h> 36#include <net/sock.h>
36#include <net/netlink.h> 37#include <net/netlink.h>
37#include <net/genetlink.h> 38#include <net/genetlink.h>
@@ -162,8 +163,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
162 int nla_a_rem; 163 int nla_a_rem;
163 int nla_b_rem; 164 int nla_b_rem;
164 165
165 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 166 if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] ||
166 !info->attrs[NLBL_CIPSOV4_A_TAGLST] ||
167 !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST]) 167 !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST])
168 return -EINVAL; 168 return -EINVAL;
169 169
@@ -344,8 +344,7 @@ static int netlbl_cipsov4_add_pass(struct genl_info *info)
344 int ret_val; 344 int ret_val;
345 struct cipso_v4_doi *doi_def = NULL; 345 struct cipso_v4_doi *doi_def = NULL;
346 346
347 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 347 if (!info->attrs[NLBL_CIPSOV4_A_TAGLST])
348 !info->attrs[NLBL_CIPSOV4_A_TAGLST])
349 return -EINVAL; 348 return -EINVAL;
350 349
351 doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); 350 doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL);
@@ -381,21 +380,40 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
381 380
382{ 381{
383 int ret_val = -EINVAL; 382 int ret_val = -EINVAL;
384 u32 map_type; 383 u32 type;
384 u32 doi;
385 const char *type_str = "(unknown)";
386 struct audit_buffer *audit_buf;
387 struct netlbl_audit audit_info;
385 388
386 if (!info->attrs[NLBL_CIPSOV4_A_MTYPE]) 389 if (!info->attrs[NLBL_CIPSOV4_A_DOI] ||
390 !info->attrs[NLBL_CIPSOV4_A_MTYPE])
387 return -EINVAL; 391 return -EINVAL;
388 392
389 map_type = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE]); 393 doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
390 switch (map_type) { 394 netlbl_netlink_auditinfo(skb, &audit_info);
395
396 type = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE]);
397 switch (type) {
391 case CIPSO_V4_MAP_STD: 398 case CIPSO_V4_MAP_STD:
399 type_str = "std";
392 ret_val = netlbl_cipsov4_add_std(info); 400 ret_val = netlbl_cipsov4_add_std(info);
393 break; 401 break;
394 case CIPSO_V4_MAP_PASS: 402 case CIPSO_V4_MAP_PASS:
403 type_str = "pass";
395 ret_val = netlbl_cipsov4_add_pass(info); 404 ret_val = netlbl_cipsov4_add_pass(info);
396 break; 405 break;
397 } 406 }
398 407
408 audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD,
409 &audit_info);
410 audit_log_format(audit_buf,
411 " cipso_doi=%u cipso_type=%s res=%u",
412 doi,
413 type_str,
414 ret_val == 0 ? 1 : 0);
415 audit_log_end(audit_buf);
416
399 return ret_val; 417 return ret_val;
400} 418}
401 419
@@ -653,12 +671,27 @@ static int netlbl_cipsov4_listall(struct sk_buff *skb,
653static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) 671static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
654{ 672{
655 int ret_val = -EINVAL; 673 int ret_val = -EINVAL;
656 u32 doi; 674 u32 doi = 0;
675 struct audit_buffer *audit_buf;
676 struct netlbl_audit audit_info;
657 677
658 if (info->attrs[NLBL_CIPSOV4_A_DOI]) { 678 if (!info->attrs[NLBL_CIPSOV4_A_DOI])
659 doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); 679 return -EINVAL;
660 ret_val = cipso_v4_doi_remove(doi, netlbl_cipsov4_doi_free); 680
661 } 681 doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
682 netlbl_netlink_auditinfo(skb, &audit_info);
683
684 ret_val = cipso_v4_doi_remove(doi,
685 &audit_info,
686 netlbl_cipsov4_doi_free);
687
688 audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL,
689 &audit_info);
690 audit_log_format(audit_buf,
691 " cipso_doi=%u res=%u",
692 doi,
693 ret_val == 0 ? 1 : 0);
694 audit_log_end(audit_buf);
662 695
663 return ret_val; 696 return ret_val;
664} 697}
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index f56d7a8ac7b7..af4371d3b459 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -35,12 +35,14 @@
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/audit.h>
38#include <net/netlabel.h> 39#include <net/netlabel.h>
39#include <net/cipso_ipv4.h> 40#include <net/cipso_ipv4.h>
40#include <asm/bug.h> 41#include <asm/bug.h>
41 42
42#include "netlabel_mgmt.h" 43#include "netlabel_mgmt.h"
43#include "netlabel_domainhash.h" 44#include "netlabel_domainhash.h"
45#include "netlabel_user.h"
44 46
45struct netlbl_domhsh_tbl { 47struct netlbl_domhsh_tbl {
46 struct list_head *tbl; 48 struct list_head *tbl;
@@ -186,6 +188,7 @@ int netlbl_domhsh_init(u32 size)
186/** 188/**
187 * netlbl_domhsh_add - Adds a entry to the domain hash table 189 * netlbl_domhsh_add - Adds a entry to the domain hash table
188 * @entry: the entry to add 190 * @entry: the entry to add
191 * @audit_info: NetLabel audit information
189 * 192 *
190 * Description: 193 * Description:
191 * Adds a new entry to the domain hash table and handles any updates to the 194 * Adds a new entry to the domain hash table and handles any updates to the
@@ -193,10 +196,13 @@ int netlbl_domhsh_init(u32 size)
193 * negative on failure. 196 * negative on failure.
194 * 197 *
195 */ 198 */
196int netlbl_domhsh_add(struct netlbl_dom_map *entry) 199int netlbl_domhsh_add(struct netlbl_dom_map *entry,
200 struct netlbl_audit *audit_info)
197{ 201{
198 int ret_val; 202 int ret_val;
199 u32 bkt; 203 u32 bkt;
204 struct audit_buffer *audit_buf;
205 char *audit_domain;
200 206
201 switch (entry->type) { 207 switch (entry->type) {
202 case NETLBL_NLTYPE_UNLABELED: 208 case NETLBL_NLTYPE_UNLABELED:
@@ -236,6 +242,26 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry)
236 spin_unlock(&netlbl_domhsh_def_lock); 242 spin_unlock(&netlbl_domhsh_def_lock);
237 } else 243 } else
238 ret_val = -EINVAL; 244 ret_val = -EINVAL;
245
246 if (entry->domain != NULL)
247 audit_domain = entry->domain;
248 else
249 audit_domain = "(default)";
250 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
251 audit_log_format(audit_buf, " nlbl_domain=%s", audit_domain);
252 switch (entry->type) {
253 case NETLBL_NLTYPE_UNLABELED:
254 audit_log_format(audit_buf, " nlbl_protocol=unlbl");
255 break;
256 case NETLBL_NLTYPE_CIPSOV4:
257 audit_log_format(audit_buf,
258 " nlbl_protocol=cipsov4 cipso_doi=%u",
259 entry->type_def.cipsov4->doi);
260 break;
261 }
262 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
263 audit_log_end(audit_buf);
264
239 rcu_read_unlock(); 265 rcu_read_unlock();
240 266
241 if (ret_val != 0) { 267 if (ret_val != 0) {
@@ -254,6 +280,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry)
254/** 280/**
255 * netlbl_domhsh_add_default - Adds the default entry to the domain hash table 281 * netlbl_domhsh_add_default - Adds the default entry to the domain hash table
256 * @entry: the entry to add 282 * @entry: the entry to add
283 * @audit_info: NetLabel audit information
257 * 284 *
258 * Description: 285 * Description:
259 * Adds a new default entry to the domain hash table and handles any updates 286 * Adds a new default entry to the domain hash table and handles any updates
@@ -261,14 +288,16 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry)
261 * negative on failure. 288 * negative on failure.
262 * 289 *
263 */ 290 */
264int netlbl_domhsh_add_default(struct netlbl_dom_map *entry) 291int netlbl_domhsh_add_default(struct netlbl_dom_map *entry,
292 struct netlbl_audit *audit_info)
265{ 293{
266 return netlbl_domhsh_add(entry); 294 return netlbl_domhsh_add(entry, audit_info);
267} 295}
268 296
269/** 297/**
270 * netlbl_domhsh_remove - Removes an entry from the domain hash table 298 * netlbl_domhsh_remove - Removes an entry from the domain hash table
271 * @domain: the domain to remove 299 * @domain: the domain to remove
300 * @audit_info: NetLabel audit information
272 * 301 *
273 * Description: 302 * Description:
274 * Removes an entry from the domain hash table and handles any updates to the 303 * Removes an entry from the domain hash table and handles any updates to the
@@ -276,10 +305,12 @@ int netlbl_domhsh_add_default(struct netlbl_dom_map *entry)
276 * negative on failure. 305 * negative on failure.
277 * 306 *
278 */ 307 */
279int netlbl_domhsh_remove(const char *domain) 308int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
280{ 309{
281 int ret_val = -ENOENT; 310 int ret_val = -ENOENT;
282 struct netlbl_dom_map *entry; 311 struct netlbl_dom_map *entry;
312 struct audit_buffer *audit_buf;
313 char *audit_domain;
283 314
284 rcu_read_lock(); 315 rcu_read_lock();
285 if (domain != NULL) 316 if (domain != NULL)
@@ -316,6 +347,18 @@ int netlbl_domhsh_remove(const char *domain)
316 ret_val = -ENOENT; 347 ret_val = -ENOENT;
317 spin_unlock(&netlbl_domhsh_def_lock); 348 spin_unlock(&netlbl_domhsh_def_lock);
318 } 349 }
350
351 if (entry->domain != NULL)
352 audit_domain = entry->domain;
353 else
354 audit_domain = "(default)";
355 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
356 audit_log_format(audit_buf,
357 " nlbl_domain=%s res=%u",
358 audit_domain,
359 ret_val == 0 ? 1 : 0);
360 audit_log_end(audit_buf);
361
319 if (ret_val == 0) 362 if (ret_val == 0)
320 call_rcu(&entry->rcu, netlbl_domhsh_free_entry); 363 call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
321 364
@@ -326,6 +369,7 @@ remove_return:
326 369
327/** 370/**
328 * netlbl_domhsh_remove_default - Removes the default entry from the table 371 * netlbl_domhsh_remove_default - Removes the default entry from the table
372 * @audit_info: NetLabel audit information
329 * 373 *
330 * Description: 374 * Description:
331 * Removes/resets the default entry for the domain hash table and handles any 375 * Removes/resets the default entry for the domain hash table and handles any
@@ -333,9 +377,9 @@ remove_return:
333 * success, non-zero on failure. 377 * success, non-zero on failure.
334 * 378 *
335 */ 379 */
336int netlbl_domhsh_remove_default(void) 380int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info)
337{ 381{
338 return netlbl_domhsh_remove(NULL); 382 return netlbl_domhsh_remove(NULL, audit_info);
339} 383}
340 384
341/** 385/**
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 02af72a7877c..3689956c3436 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -57,9 +57,11 @@ struct netlbl_dom_map {
57int netlbl_domhsh_init(u32 size); 57int netlbl_domhsh_init(u32 size);
58 58
59/* Manipulate the domain hash table */ 59/* Manipulate the domain hash table */
60int netlbl_domhsh_add(struct netlbl_dom_map *entry); 60int netlbl_domhsh_add(struct netlbl_dom_map *entry,
61int netlbl_domhsh_add_default(struct netlbl_dom_map *entry); 61 struct netlbl_audit *audit_info);
62int netlbl_domhsh_remove_default(void); 62int netlbl_domhsh_add_default(struct netlbl_dom_map *entry,
63 struct netlbl_audit *audit_info);
64int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
63struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain); 65struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
64int netlbl_domhsh_walk(u32 *skip_bkt, 66int netlbl_domhsh_walk(u32 *skip_bkt,
65 u32 *skip_chain, 67 u32 *skip_chain,
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 54fb7de3c2b1..ff971103fd0c 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -200,7 +200,7 @@ void netlbl_cache_invalidate(void)
200int netlbl_cache_add(const struct sk_buff *skb, 200int netlbl_cache_add(const struct sk_buff *skb,
201 const struct netlbl_lsm_secattr *secattr) 201 const struct netlbl_lsm_secattr *secattr)
202{ 202{
203 if (secattr->cache.data == NULL) 203 if (secattr->cache == NULL)
204 return -ENOMSG; 204 return -ENOMSG;
205 205
206 if (CIPSO_V4_OPTEXIST(skb)) 206 if (CIPSO_V4_OPTEXIST(skb))
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 8626c9f678eb..53c9079ad2c3 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -87,11 +87,14 @@ static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info)
87 struct netlbl_dom_map *entry = NULL; 87 struct netlbl_dom_map *entry = NULL;
88 size_t tmp_size; 88 size_t tmp_size;
89 u32 tmp_val; 89 u32 tmp_val;
90 struct netlbl_audit audit_info;
90 91
91 if (!info->attrs[NLBL_MGMT_A_DOMAIN] || 92 if (!info->attrs[NLBL_MGMT_A_DOMAIN] ||
92 !info->attrs[NLBL_MGMT_A_PROTOCOL]) 93 !info->attrs[NLBL_MGMT_A_PROTOCOL])
93 goto add_failure; 94 goto add_failure;
94 95
96 netlbl_netlink_auditinfo(skb, &audit_info);
97
95 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 98 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
96 if (entry == NULL) { 99 if (entry == NULL) {
97 ret_val = -ENOMEM; 100 ret_val = -ENOMEM;
@@ -108,7 +111,7 @@ static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info)
108 111
109 switch (entry->type) { 112 switch (entry->type) {
110 case NETLBL_NLTYPE_UNLABELED: 113 case NETLBL_NLTYPE_UNLABELED:
111 ret_val = netlbl_domhsh_add(entry); 114 ret_val = netlbl_domhsh_add(entry, &audit_info);
112 break; 115 break;
113 case NETLBL_NLTYPE_CIPSOV4: 116 case NETLBL_NLTYPE_CIPSOV4:
114 if (!info->attrs[NLBL_MGMT_A_CV4DOI]) 117 if (!info->attrs[NLBL_MGMT_A_CV4DOI])
@@ -125,7 +128,7 @@ static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info)
125 rcu_read_unlock(); 128 rcu_read_unlock();
126 goto add_failure; 129 goto add_failure;
127 } 130 }
128 ret_val = netlbl_domhsh_add(entry); 131 ret_val = netlbl_domhsh_add(entry, &audit_info);
129 rcu_read_unlock(); 132 rcu_read_unlock();
130 break; 133 break;
131 default: 134 default:
@@ -156,12 +159,15 @@ add_failure:
156static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info) 159static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info)
157{ 160{
158 char *domain; 161 char *domain;
162 struct netlbl_audit audit_info;
159 163
160 if (!info->attrs[NLBL_MGMT_A_DOMAIN]) 164 if (!info->attrs[NLBL_MGMT_A_DOMAIN])
161 return -EINVAL; 165 return -EINVAL;
162 166
167 netlbl_netlink_auditinfo(skb, &audit_info);
168
163 domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]); 169 domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]);
164 return netlbl_domhsh_remove(domain); 170 return netlbl_domhsh_remove(domain, &audit_info);
165} 171}
166 172
167/** 173/**
@@ -264,10 +270,13 @@ static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info)
264 int ret_val = -EINVAL; 270 int ret_val = -EINVAL;
265 struct netlbl_dom_map *entry = NULL; 271 struct netlbl_dom_map *entry = NULL;
266 u32 tmp_val; 272 u32 tmp_val;
273 struct netlbl_audit audit_info;
267 274
268 if (!info->attrs[NLBL_MGMT_A_PROTOCOL]) 275 if (!info->attrs[NLBL_MGMT_A_PROTOCOL])
269 goto adddef_failure; 276 goto adddef_failure;
270 277
278 netlbl_netlink_auditinfo(skb, &audit_info);
279
271 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 280 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
272 if (entry == NULL) { 281 if (entry == NULL) {
273 ret_val = -ENOMEM; 282 ret_val = -ENOMEM;
@@ -277,7 +286,7 @@ static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info)
277 286
278 switch (entry->type) { 287 switch (entry->type) {
279 case NETLBL_NLTYPE_UNLABELED: 288 case NETLBL_NLTYPE_UNLABELED:
280 ret_val = netlbl_domhsh_add_default(entry); 289 ret_val = netlbl_domhsh_add_default(entry, &audit_info);
281 break; 290 break;
282 case NETLBL_NLTYPE_CIPSOV4: 291 case NETLBL_NLTYPE_CIPSOV4:
283 if (!info->attrs[NLBL_MGMT_A_CV4DOI]) 292 if (!info->attrs[NLBL_MGMT_A_CV4DOI])
@@ -294,7 +303,7 @@ static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info)
294 rcu_read_unlock(); 303 rcu_read_unlock();
295 goto adddef_failure; 304 goto adddef_failure;
296 } 305 }
297 ret_val = netlbl_domhsh_add_default(entry); 306 ret_val = netlbl_domhsh_add_default(entry, &audit_info);
298 rcu_read_unlock(); 307 rcu_read_unlock();
299 break; 308 break;
300 default: 309 default:
@@ -322,7 +331,11 @@ adddef_failure:
322 */ 331 */
323static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info) 332static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info)
324{ 333{
325 return netlbl_domhsh_remove_default(); 334 struct netlbl_audit audit_info;
335
336 netlbl_netlink_auditinfo(skb, &audit_info);
337
338 return netlbl_domhsh_remove_default(&audit_info);
326} 339}
327 340
328/** 341/**
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 440f5c4e1e2d..1833ad233b39 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -64,6 +64,34 @@ static struct nla_policy netlbl_unlabel_genl_policy[NLBL_UNLABEL_A_MAX + 1] = {
64}; 64};
65 65
66/* 66/*
67 * Helper Functions
68 */
69
70/**
71 * netlbl_unlabel_acceptflg_set - Set the unlabeled accept flag
72 * @value: desired value
73 * @audit_info: NetLabel audit information
74 *
75 * Description:
76 * Set the value of the unlabeled accept flag to @value.
77 *
78 */
79static void netlbl_unlabel_acceptflg_set(u8 value,
80 struct netlbl_audit *audit_info)
81{
82 struct audit_buffer *audit_buf;
83 u8 old_val;
84
85 old_val = atomic_read(&netlabel_unlabel_accept_flg);
86 atomic_set(&netlabel_unlabel_accept_flg, value);
87
88 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW,
89 audit_info);
90 audit_log_format(audit_buf, " unlbl_accept=%u old=%u", value, old_val);
91 audit_log_end(audit_buf);
92}
93
94/*
67 * NetLabel Command Handlers 95 * NetLabel Command Handlers
68 */ 96 */
69 97
@@ -79,18 +107,19 @@ static struct nla_policy netlbl_unlabel_genl_policy[NLBL_UNLABEL_A_MAX + 1] = {
79 */ 107 */
80static int netlbl_unlabel_accept(struct sk_buff *skb, struct genl_info *info) 108static int netlbl_unlabel_accept(struct sk_buff *skb, struct genl_info *info)
81{ 109{
82 int ret_val = -EINVAL;
83 u8 value; 110 u8 value;
111 struct netlbl_audit audit_info;
84 112
85 if (info->attrs[NLBL_UNLABEL_A_ACPTFLG]) { 113 if (info->attrs[NLBL_UNLABEL_A_ACPTFLG]) {
86 value = nla_get_u8(info->attrs[NLBL_UNLABEL_A_ACPTFLG]); 114 value = nla_get_u8(info->attrs[NLBL_UNLABEL_A_ACPTFLG]);
87 if (value == 1 || value == 0) { 115 if (value == 1 || value == 0) {
88 atomic_set(&netlabel_unlabel_accept_flg, value); 116 netlbl_netlink_auditinfo(skb, &audit_info);
89 ret_val = 0; 117 netlbl_unlabel_acceptflg_set(value, &audit_info);
118 return 0;
90 } 119 }
91 } 120 }
92 121
93 return ret_val; 122 return -EINVAL;
94} 123}
95 124
96/** 125/**
@@ -229,16 +258,23 @@ int netlbl_unlabel_defconf(void)
229{ 258{
230 int ret_val; 259 int ret_val;
231 struct netlbl_dom_map *entry; 260 struct netlbl_dom_map *entry;
261 struct netlbl_audit audit_info;
262
263 /* Only the kernel is allowed to call this function and the only time
264 * it is called is at bootup before the audit subsystem is reporting
265 * messages so don't worry to much about these values. */
266 security_task_getsecid(current, &audit_info.secid);
267 audit_info.loginuid = 0;
232 268
233 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 269 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
234 if (entry == NULL) 270 if (entry == NULL)
235 return -ENOMEM; 271 return -ENOMEM;
236 entry->type = NETLBL_NLTYPE_UNLABELED; 272 entry->type = NETLBL_NLTYPE_UNLABELED;
237 ret_val = netlbl_domhsh_add_default(entry); 273 ret_val = netlbl_domhsh_add_default(entry, &audit_info);
238 if (ret_val != 0) 274 if (ret_val != 0)
239 return ret_val; 275 return ret_val;
240 276
241 atomic_set(&netlabel_unlabel_accept_flg, 1); 277 netlbl_unlabel_acceptflg_set(1, &audit_info);
242 278
243 return 0; 279 return 0;
244} 280}
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index eeb7d768d2bb..98a416381e61 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -32,6 +32,9 @@
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/socket.h> 34#include <linux/socket.h>
35#include <linux/audit.h>
36#include <linux/tty.h>
37#include <linux/security.h>
35#include <net/sock.h> 38#include <net/sock.h>
36#include <net/netlink.h> 39#include <net/netlink.h>
37#include <net/genetlink.h> 40#include <net/genetlink.h>
@@ -74,3 +77,41 @@ int netlbl_netlink_init(void)
74 77
75 return 0; 78 return 0;
76} 79}
80
81/*
82 * NetLabel Audit Functions
83 */
84
85/**
86 * netlbl_audit_start_common - Start an audit message
87 * @type: audit message type
88 * @audit_info: NetLabel audit information
89 *
90 * Description:
91 * Start an audit message using the type specified in @type and fill the audit
92 * message with some fields common to all NetLabel audit messages. Returns
93 * a pointer to the audit buffer on success, NULL on failure.
94 *
95 */
96struct audit_buffer *netlbl_audit_start_common(int type,
97 struct netlbl_audit *audit_info)
98{
99 struct audit_context *audit_ctx = current->audit_context;
100 struct audit_buffer *audit_buf;
101 char *secctx;
102 u32 secctx_len;
103
104 audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type);
105 if (audit_buf == NULL)
106 return NULL;
107
108 audit_log_format(audit_buf, "netlabel: auid=%u", audit_info->loginuid);
109
110 if (audit_info->secid != 0 &&
111 security_secid_to_secctx(audit_info->secid,
112 &secctx,
113 &secctx_len) == 0)
114 audit_log_format(audit_buf, " subj=%s", secctx);
115
116 return audit_buf;
117}
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index 3f9386b917df..47967ef32964 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -34,6 +34,7 @@
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <linux/capability.h> 36#include <linux/capability.h>
37#include <linux/audit.h>
37#include <net/netlink.h> 38#include <net/netlink.h>
38#include <net/genetlink.h> 39#include <net/genetlink.h>
39#include <net/netlabel.h> 40#include <net/netlabel.h>
@@ -71,8 +72,25 @@ static inline void *netlbl_netlink_hdr_put(struct sk_buff *skb,
71 NETLBL_PROTO_VERSION); 72 NETLBL_PROTO_VERSION);
72} 73}
73 74
75/**
76 * netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg
77 * @skb: the packet
78 * @audit_info: NetLabel audit information
79 */
80static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
81 struct netlbl_audit *audit_info)
82{
83 audit_info->secid = NETLINK_CB(skb).sid;
84 audit_info->loginuid = NETLINK_CB(skb).loginuid;
85}
86
74/* NetLabel NETLINK I/O functions */ 87/* NetLabel NETLINK I/O functions */
75 88
76int netlbl_netlink_init(void); 89int netlbl_netlink_init(void);
77 90
91/* NetLabel Audit Functions */
92
93struct audit_buffer *netlbl_audit_start_common(int type,
94 struct netlbl_audit *audit_info);
95
78#endif 96#endif
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 465efc86fccf..94b2e2fe6fdb 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -381,11 +381,10 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
381 381
382 /* allocate a new message record */ 382 /* allocate a new message record */
383 ret = -ENOMEM; 383 ret = -ENOMEM;
384 msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 384 msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL);
385 if (!msg) 385 if (!msg)
386 goto error; 386 goto error;
387 387
388 memcpy(msg, jumbomsg, sizeof(*msg));
389 list_add_tail(&msg->link, msgq); 388 list_add_tail(&msg->link, msgq);
390 389
391 /* adjust the jumbo packet */ 390 /* adjust the jumbo packet */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7e14f14058e9..37a184021647 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -401,7 +401,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
401 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) 401 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
402 return skb->len; 402 return skb->len;
403 403
404 read_lock_bh(&qdisc_tree_lock); 404 read_lock(&qdisc_tree_lock);
405 if (!tcm->tcm_parent) 405 if (!tcm->tcm_parent)
406 q = dev->qdisc_sleeping; 406 q = dev->qdisc_sleeping;
407 else 407 else
@@ -458,7 +458,7 @@ errout:
458 if (cl) 458 if (cl)
459 cops->put(q, cl); 459 cops->put(q, cl);
460out: 460out:
461 read_unlock_bh(&qdisc_tree_lock); 461 read_unlock(&qdisc_tree_lock);
462 dev_put(dev); 462 dev_put(dev);
463 return skb->len; 463 return skb->len;
464} 464}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 86cac49a0531..09fda68c8b39 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -194,7 +194,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
194 if (handle) 194 if (handle)
195 f->handle = handle; 195 f->handle = handle;
196 else { 196 else {
197 int i = 0x80000000; 197 unsigned int i = 0x80000000;
198 do { 198 do {
199 if (++head->hgenerator == 0x7FFFFFFF) 199 if (++head->hgenerator == 0x7FFFFFFF)
200 head->hgenerator = 1; 200 head->hgenerator = 1;
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
deleted file mode 100644
index 0ebc98e9be2d..000000000000
--- a/net/sched/estimator.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * net/sched/estimator.c Simple rate estimator.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <asm/uaccess.h>
13#include <asm/system.h>
14#include <linux/bitops.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/jiffies.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h>
28#include <linux/rtnetlink.h>
29#include <linux/init.h>
30#include <net/sock.h>
31#include <net/pkt_sched.h>
32
33/*
34 This code is NOT intended to be used for statistics collection,
35 its purpose is to provide a base for statistical multiplexing
36 for controlled load service.
37 If you need only statistics, run a user level daemon which
38 periodically reads byte counters.
39
40 Unfortunately, rate estimation is not a very easy task.
41 F.e. I did not find a simple way to estimate the current peak rate
42 and even failed to formulate the problem 8)8)
43
44 So I preferred not to built an estimator into the scheduler,
45 but run this task separately.
46 Ideally, it should be kernel thread(s), but for now it runs
47 from timers, which puts apparent top bounds on the number of rated
48 flows, has minimal overhead on small, but is enough
49 to handle controlled load service, sets of aggregates.
50
51 We measure rate over A=(1<<interval) seconds and evaluate EWMA:
52
53 avrate = avrate*(1-W) + rate*W
54
55 where W is chosen as negative power of 2: W = 2^(-ewma_log)
56
57 The resulting time constant is:
58
59 T = A/(-ln(1-W))
60
61
62 NOTES.
63
64 * The stored value for avbps is scaled by 2^5, so that maximal
65 rate is ~1Gbit, avpps is scaled by 2^10.
66
67 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
68 for HZ=100 and HZ=1024 8)), maximal interval
69 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
70 are too expensive, longer ones can be implemented
71 at user level painlessly.
72 */
73
74#define EST_MAX_INTERVAL 5
75
76struct qdisc_estimator
77{
78 struct qdisc_estimator *next;
79 struct tc_stats *stats;
80 spinlock_t *stats_lock;
81 unsigned interval;
82 int ewma_log;
83 u64 last_bytes;
84 u32 last_packets;
85 u32 avpps;
86 u32 avbps;
87};
88
89struct qdisc_estimator_head
90{
91 struct timer_list timer;
92 struct qdisc_estimator *list;
93};
94
95static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1];
96
97/* Estimator array lock */
98static DEFINE_RWLOCK(est_lock);
99
100static void est_timer(unsigned long arg)
101{
102 int idx = (int)arg;
103 struct qdisc_estimator *e;
104
105 read_lock(&est_lock);
106 for (e = elist[idx].list; e; e = e->next) {
107 struct tc_stats *st = e->stats;
108 u64 nbytes;
109 u32 npackets;
110 u32 rate;
111
112 spin_lock(e->stats_lock);
113 nbytes = st->bytes;
114 npackets = st->packets;
115 rate = (nbytes - e->last_bytes)<<(7 - idx);
116 e->last_bytes = nbytes;
117 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
118 st->bps = (e->avbps+0xF)>>5;
119
120 rate = (npackets - e->last_packets)<<(12 - idx);
121 e->last_packets = npackets;
122 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
123 e->stats->pps = (e->avpps+0x1FF)>>10;
124 spin_unlock(e->stats_lock);
125 }
126
127 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
128 read_unlock(&est_lock);
129}
130
131int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct rtattr *opt)
132{
133 struct qdisc_estimator *est;
134 struct tc_estimator *parm = RTA_DATA(opt);
135
136 if (RTA_PAYLOAD(opt) < sizeof(*parm))
137 return -EINVAL;
138
139 if (parm->interval < -2 || parm->interval > 3)
140 return -EINVAL;
141
142 est = kzalloc(sizeof(*est), GFP_KERNEL);
143 if (est == NULL)
144 return -ENOBUFS;
145
146 est->interval = parm->interval + 2;
147 est->stats = stats;
148 est->stats_lock = stats_lock;
149 est->ewma_log = parm->ewma_log;
150 est->last_bytes = stats->bytes;
151 est->avbps = stats->bps<<5;
152 est->last_packets = stats->packets;
153 est->avpps = stats->pps<<10;
154
155 est->next = elist[est->interval].list;
156 if (est->next == NULL) {
157 init_timer(&elist[est->interval].timer);
158 elist[est->interval].timer.data = est->interval;
159 elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
160 elist[est->interval].timer.function = est_timer;
161 add_timer(&elist[est->interval].timer);
162 }
163 write_lock_bh(&est_lock);
164 elist[est->interval].list = est;
165 write_unlock_bh(&est_lock);
166 return 0;
167}
168
169void qdisc_kill_estimator(struct tc_stats *stats)
170{
171 int idx;
172 struct qdisc_estimator *est, **pest;
173
174 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
175 int killed = 0;
176 pest = &elist[idx].list;
177 while ((est=*pest) != NULL) {
178 if (est->stats != stats) {
179 pest = &est->next;
180 continue;
181 }
182
183 write_lock_bh(&est_lock);
184 *pest = est->next;
185 write_unlock_bh(&est_lock);
186
187 kfree(est);
188 killed++;
189 }
190 if (killed && elist[idx].list == NULL)
191 del_timer(&elist[idx].timer);
192 }
193}
194
195EXPORT_SYMBOL(qdisc_kill_estimator);
196EXPORT_SYMBOL(qdisc_new_estimator);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a19eff12cf78..0b6489291140 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -195,14 +195,14 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
195{ 195{
196 struct Qdisc *q; 196 struct Qdisc *q;
197 197
198 read_lock_bh(&qdisc_tree_lock); 198 read_lock(&qdisc_tree_lock);
199 list_for_each_entry(q, &dev->qdisc_list, list) { 199 list_for_each_entry(q, &dev->qdisc_list, list) {
200 if (q->handle == handle) { 200 if (q->handle == handle) {
201 read_unlock_bh(&qdisc_tree_lock); 201 read_unlock(&qdisc_tree_lock);
202 return q; 202 return q;
203 } 203 }
204 } 204 }
205 read_unlock_bh(&qdisc_tree_lock); 205 read_unlock(&qdisc_tree_lock);
206 return NULL; 206 return NULL;
207} 207}
208 208
@@ -837,7 +837,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
837 continue; 837 continue;
838 if (idx > s_idx) 838 if (idx > s_idx)
839 s_q_idx = 0; 839 s_q_idx = 0;
840 read_lock_bh(&qdisc_tree_lock); 840 read_lock(&qdisc_tree_lock);
841 q_idx = 0; 841 q_idx = 0;
842 list_for_each_entry(q, &dev->qdisc_list, list) { 842 list_for_each_entry(q, &dev->qdisc_list, list) {
843 if (q_idx < s_q_idx) { 843 if (q_idx < s_q_idx) {
@@ -846,12 +846,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
846 } 846 }
847 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, 847 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
848 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) { 848 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
849 read_unlock_bh(&qdisc_tree_lock); 849 read_unlock(&qdisc_tree_lock);
850 goto done; 850 goto done;
851 } 851 }
852 q_idx++; 852 q_idx++;
853 } 853 }
854 read_unlock_bh(&qdisc_tree_lock); 854 read_unlock(&qdisc_tree_lock);
855 } 855 }
856 856
857done: 857done:
@@ -1074,7 +1074,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1074 s_t = cb->args[0]; 1074 s_t = cb->args[0];
1075 t = 0; 1075 t = 0;
1076 1076
1077 read_lock_bh(&qdisc_tree_lock); 1077 read_lock(&qdisc_tree_lock);
1078 list_for_each_entry(q, &dev->qdisc_list, list) { 1078 list_for_each_entry(q, &dev->qdisc_list, list) {
1079 if (t < s_t || !q->ops->cl_ops || 1079 if (t < s_t || !q->ops->cl_ops ||
1080 (tcm->tcm_parent && 1080 (tcm->tcm_parent &&
@@ -1096,7 +1096,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1096 break; 1096 break;
1097 t++; 1097 t++;
1098 } 1098 }
1099 read_unlock_bh(&qdisc_tree_lock); 1099 read_unlock(&qdisc_tree_lock);
1100 1100
1101 cb->args[0] = t; 1101 cb->args[0] = t;
1102 1102
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6f9151899795..88c6a99ce53c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -45,11 +45,10 @@
45 The idea is the following: 45 The idea is the following:
46 - enqueue, dequeue are serialized via top level device 46 - enqueue, dequeue are serialized via top level device
47 spinlock dev->queue_lock. 47 spinlock dev->queue_lock.
48 - tree walking is protected by read_lock_bh(qdisc_tree_lock) 48 - tree walking is protected by read_lock(qdisc_tree_lock)
49 and this lock is used only in process context. 49 and this lock is used only in process context.
50 - updates to tree are made under rtnl semaphore or 50 - updates to tree are made only under rtnl semaphore,
51 from softirq context (__qdisc_destroy rcu-callback) 51 hence this lock may be made without local bh disabling.
52 hence this lock needs local bh disabling.
53 52
54 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! 53 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
55 */ 54 */
@@ -57,14 +56,14 @@ DEFINE_RWLOCK(qdisc_tree_lock);
57 56
58void qdisc_lock_tree(struct net_device *dev) 57void qdisc_lock_tree(struct net_device *dev)
59{ 58{
60 write_lock_bh(&qdisc_tree_lock); 59 write_lock(&qdisc_tree_lock);
61 spin_lock_bh(&dev->queue_lock); 60 spin_lock_bh(&dev->queue_lock);
62} 61}
63 62
64void qdisc_unlock_tree(struct net_device *dev) 63void qdisc_unlock_tree(struct net_device *dev)
65{ 64{
66 spin_unlock_bh(&dev->queue_lock); 65 spin_unlock_bh(&dev->queue_lock);
67 write_unlock_bh(&qdisc_tree_lock); 66 write_unlock(&qdisc_tree_lock);
68} 67}
69 68
70/* 69/*
@@ -483,20 +482,6 @@ void qdisc_reset(struct Qdisc *qdisc)
483static void __qdisc_destroy(struct rcu_head *head) 482static void __qdisc_destroy(struct rcu_head *head)
484{ 483{
485 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); 484 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
486 struct Qdisc_ops *ops = qdisc->ops;
487
488#ifdef CONFIG_NET_ESTIMATOR
489 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
490#endif
491 write_lock(&qdisc_tree_lock);
492 if (ops->reset)
493 ops->reset(qdisc);
494 if (ops->destroy)
495 ops->destroy(qdisc);
496 write_unlock(&qdisc_tree_lock);
497 module_put(ops->owner);
498
499 dev_put(qdisc->dev);
500 kfree((char *) qdisc - qdisc->padded); 485 kfree((char *) qdisc - qdisc->padded);
501} 486}
502 487
@@ -504,32 +489,23 @@ static void __qdisc_destroy(struct rcu_head *head)
504 489
505void qdisc_destroy(struct Qdisc *qdisc) 490void qdisc_destroy(struct Qdisc *qdisc)
506{ 491{
507 struct list_head cql = LIST_HEAD_INIT(cql); 492 struct Qdisc_ops *ops = qdisc->ops;
508 struct Qdisc *cq, *q, *n;
509 493
510 if (qdisc->flags & TCQ_F_BUILTIN || 494 if (qdisc->flags & TCQ_F_BUILTIN ||
511 !atomic_dec_and_test(&qdisc->refcnt)) 495 !atomic_dec_and_test(&qdisc->refcnt))
512 return; 496 return;
513 497
514 if (!list_empty(&qdisc->list)) { 498 list_del(&qdisc->list);
515 if (qdisc->ops->cl_ops == NULL) 499#ifdef CONFIG_NET_ESTIMATOR
516 list_del(&qdisc->list); 500 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
517 else 501#endif
518 list_move(&qdisc->list, &cql); 502 if (ops->reset)
519 } 503 ops->reset(qdisc);
520 504 if (ops->destroy)
521 /* unlink inner qdiscs from dev->qdisc_list immediately */ 505 ops->destroy(qdisc);
522 list_for_each_entry(cq, &cql, list)
523 list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
524 if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
525 if (q->ops->cl_ops == NULL)
526 list_del_init(&q->list);
527 else
528 list_move_tail(&q->list, &cql);
529 }
530 list_for_each_entry_safe(cq, n, &cql, list)
531 list_del_init(&cq->list);
532 506
507 module_put(ops->owner);
508 dev_put(qdisc->dev);
533 call_rcu(&qdisc->q_rcu, __qdisc_destroy); 509 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
534} 510}
535 511
@@ -549,15 +525,15 @@ void dev_activate(struct net_device *dev)
549 printk(KERN_INFO "%s: activation failed\n", dev->name); 525 printk(KERN_INFO "%s: activation failed\n", dev->name);
550 return; 526 return;
551 } 527 }
552 write_lock_bh(&qdisc_tree_lock); 528 write_lock(&qdisc_tree_lock);
553 list_add_tail(&qdisc->list, &dev->qdisc_list); 529 list_add_tail(&qdisc->list, &dev->qdisc_list);
554 write_unlock_bh(&qdisc_tree_lock); 530 write_unlock(&qdisc_tree_lock);
555 } else { 531 } else {
556 qdisc = &noqueue_qdisc; 532 qdisc = &noqueue_qdisc;
557 } 533 }
558 write_lock_bh(&qdisc_tree_lock); 534 write_lock(&qdisc_tree_lock);
559 dev->qdisc_sleeping = qdisc; 535 dev->qdisc_sleeping = qdisc;
560 write_unlock_bh(&qdisc_tree_lock); 536 write_unlock(&qdisc_tree_lock);
561 } 537 }
562 538
563 if (!netif_carrier_ok(dev)) 539 if (!netif_carrier_ok(dev))
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index bb3ddd4784b1..9b9c555c713f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -786,11 +786,10 @@ static long htb_do_events(struct htb_sched *q, int level)
786 for (i = 0; i < 500; i++) { 786 for (i = 0; i < 500; i++) {
787 struct htb_class *cl; 787 struct htb_class *cl;
788 long diff; 788 long diff;
789 struct rb_node *p = q->wait_pq[level].rb_node; 789 struct rb_node *p = rb_first(&q->wait_pq[level]);
790
790 if (!p) 791 if (!p)
791 return 0; 792 return 0;
792 while (p->rb_left)
793 p = p->rb_left;
794 793
795 cl = rb_entry(p, struct htb_class, pq_node); 794 cl = rb_entry(p, struct htb_class, pq_node);
796 if (time_after(cl->pq_key, q->jiffies)) { 795 if (time_after(cl->pq_key, q->jiffies)) {
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 03f65de75d88..64f630102532 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -218,12 +218,6 @@ int sctp_rcv(struct sk_buff *skb)
218 } 218 }
219 } 219 }
220 220
221 /* SCTP seems to always need a timestamp right now (FIXME) */
222 if (skb->tstamp.off_sec == 0) {
223 __net_timestamp(skb);
224 sock_enable_timestamp(sk);
225 }
226
227 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) 221 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
228 goto discard_release; 222 goto discard_release;
229 nf_reset(skb); 223 nf_reset(skb);
@@ -388,7 +382,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
388 * pmtu discovery on this transport. 382 * pmtu discovery on this transport.
389 */ 383 */
390 t->pathmtu = SCTP_DEFAULT_MINSEGMENT; 384 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
391 t->param_flags = (t->param_flags & ~SPP_HB) | 385 t->param_flags = (t->param_flags & ~SPP_PMTUD) |
392 SPP_PMTUD_DISABLE; 386 SPP_PMTUD_DISABLE;
393 } else { 387 } else {
394 t->pathmtu = pmtu; 388 t->pathmtu = pmtu;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 249e5033c1a8..78071c6e6cf1 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -215,17 +215,17 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
215 } 215 }
216 216
217 dst = ip6_route_output(NULL, &fl); 217 dst = ip6_route_output(NULL, &fl);
218 if (dst) { 218 if (!dst->error) {
219 struct rt6_info *rt; 219 struct rt6_info *rt;
220 rt = (struct rt6_info *)dst; 220 rt = (struct rt6_info *)dst;
221 SCTP_DEBUG_PRINTK( 221 SCTP_DEBUG_PRINTK(
222 "rt6_dst:" NIP6_FMT " rt6_src:" NIP6_FMT "\n", 222 "rt6_dst:" NIP6_FMT " rt6_src:" NIP6_FMT "\n",
223 NIP6(rt->rt6i_dst.addr), NIP6(rt->rt6i_src.addr)); 223 NIP6(rt->rt6i_dst.addr), NIP6(rt->rt6i_src.addr));
224 } else { 224 return dst;
225 SCTP_DEBUG_PRINTK("NO ROUTE\n");
226 } 225 }
227 226 SCTP_DEBUG_PRINTK("NO ROUTE\n");
228 return dst; 227 dst_release(dst);
228 return NULL;
229} 229}
230 230
231/* Returns the number of consecutive initial bits that match in the 2 ipv6 231/* Returns the number of consecutive initial bits that match in the 2 ipv6
diff --git a/net/sctp/output.c b/net/sctp/output.c
index cdc5a3936766..3ef4351dd956 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -633,7 +633,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
633 * data will fit or delay in hopes of bundling a full 633 * data will fit or delay in hopes of bundling a full
634 * sized packet. 634 * sized packet.
635 */ 635 */
636 if (len < asoc->pathmtu - packet->overhead) { 636 if (len < asoc->frag_point) {
637 retval = SCTP_XMIT_NAGLE_DELAY; 637 retval = SCTP_XMIT_NAGLE_DELAY;
638 goto finish; 638 goto finish;
639 } 639 }
@@ -645,7 +645,13 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
645 /* Keep track of how many bytes are in flight to the receiver. */ 645 /* Keep track of how many bytes are in flight to the receiver. */
646 asoc->outqueue.outstanding_bytes += datasize; 646 asoc->outqueue.outstanding_bytes += datasize;
647 647
648 /* Update our view of the receiver's rwnd. */ 648 /* Update our view of the receiver's rwnd. Include sk_buff overhead
649 * while updating peer.rwnd so that it reduces the chances of a
650 * receiver running out of receive buffer space even when receive
651 * window is still open. This can happen when a sender is sending
652 * sending small messages.
653 */
654 datasize += sizeof(struct sk_buff);
649 if (datasize < rwnd) 655 if (datasize < rwnd)
650 rwnd -= datasize; 656 rwnd -= datasize;
651 else 657 else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 37074a39ecbb..739582415bf6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -416,7 +416,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
416 * (Section 7.2.4)), add the data size of those 416 * (Section 7.2.4)), add the data size of those
417 * chunks to the rwnd. 417 * chunks to the rwnd.
418 */ 418 */
419 q->asoc->peer.rwnd += sctp_data_size(chunk); 419 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
420 sizeof(struct sk_buff));
420 q->outstanding_bytes -= sctp_data_size(chunk); 421 q->outstanding_bytes -= sctp_data_size(chunk);
421 transport->flight_size -= sctp_data_size(chunk); 422 transport->flight_size -= sctp_data_size(chunk);
422 423
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index a356d8d310a9..7f49e769080e 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -344,7 +344,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
344 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 344 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
345 assoc->state, hash, assoc->assoc_id, 345 assoc->state, hash, assoc->assoc_id,
346 assoc->sndbuf_used, 346 assoc->sndbuf_used,
347 (sk->sk_rcvbuf - assoc->rwnd), 347 atomic_read(&assoc->rmem_alloc),
348 sock_i_uid(sk), sock_i_ino(sk), 348 sock_i_uid(sk), sock_i_ino(sk),
349 epb->bind_addr.port, 349 epb->bind_addr.port,
350 assoc->peer.port); 350 assoc->peer.port);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7745bdea7817..507dff72c585 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1447,8 +1447,16 @@ no_hmac:
1447 /* Check to see if the cookie is stale. If there is already 1447 /* Check to see if the cookie is stale. If there is already
1448 * an association, there is no need to check cookie's expiration 1448 * an association, there is no need to check cookie's expiration
1449 * for init collision case of lost COOKIE ACK. 1449 * for init collision case of lost COOKIE ACK.
1450 * If skb has been timestamped, then use the stamp, otherwise
1451 * use current time. This introduces a small possibility that
1452 * that a cookie may be considered expired, but his would only slow
1453 * down the new association establishment instead of every packet.
1450 */ 1454 */
1451 skb_get_timestamp(skb, &tv); 1455 if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
1456 skb_get_timestamp(skb, &tv);
1457 else
1458 do_gettimeofday(&tv);
1459
1452 if (!asoc && tv_lt(bear_cookie->expiration, tv)) { 1460 if (!asoc && tv_lt(bear_cookie->expiration, tv)) {
1453 __u16 len; 1461 __u16 len;
1454 /* 1462 /*
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 79c3e072cf28..9f34dec6ff8e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -821,7 +821,7 @@ out:
821 * addrs is a pointer to an array of one or more socket addresses. Each 821 * addrs is a pointer to an array of one or more socket addresses. Each
822 * address is contained in its appropriate structure (i.e. struct 822 * address is contained in its appropriate structure (i.e. struct
823 * sockaddr_in or struct sockaddr_in6) the family of the address type 823 * sockaddr_in or struct sockaddr_in6) the family of the address type
824 * must be used to distengish the address length (note that this 824 * must be used to distinguish the address length (note that this
825 * representation is termed a "packed array" of addresses). The caller 825 * representation is termed a "packed array" of addresses). The caller
826 * specifies the number of addresses in the array with addrcnt. 826 * specifies the number of addresses in the array with addrcnt.
827 * 827 *
@@ -3084,8 +3084,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3084 */ 3084 */
3085 sp->disable_fragments = 0; 3085 sp->disable_fragments = 0;
3086 3086
3087 /* Turn on/off any Nagle-like algorithm. */ 3087 /* Enable Nagle algorithm by default. */
3088 sp->nodelay = 1; 3088 sp->nodelay = 0;
3089 3089
3090 /* Enable by default. */ 3090 /* Enable by default. */
3091 sp->v4mapped = 1; 3091 sp->v4mapped = 1;
@@ -5362,6 +5362,20 @@ static void sctp_wfree(struct sk_buff *skb)
5362 sctp_association_put(asoc); 5362 sctp_association_put(asoc);
5363} 5363}
5364 5364
5365/* Do accounting for the receive space on the socket.
5366 * Accounting for the association is done in ulpevent.c
5367 * We set this as a destructor for the cloned data skbs so that
5368 * accounting is done at the correct time.
5369 */
5370void sctp_sock_rfree(struct sk_buff *skb)
5371{
5372 struct sock *sk = skb->sk;
5373 struct sctp_ulpevent *event = sctp_skb2event(skb);
5374
5375 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
5376}
5377
5378
5365/* Helper function to wait for space in the sndbuf. */ 5379/* Helper function to wait for space in the sndbuf. */
5366static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 5380static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
5367 size_t msg_len) 5381 size_t msg_len)
@@ -5634,10 +5648,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5634 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 5648 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
5635 event = sctp_skb2event(skb); 5649 event = sctp_skb2event(skb);
5636 if (event->asoc == assoc) { 5650 if (event->asoc == assoc) {
5637 sock_rfree(skb); 5651 sctp_sock_rfree(skb);
5638 __skb_unlink(skb, &oldsk->sk_receive_queue); 5652 __skb_unlink(skb, &oldsk->sk_receive_queue);
5639 __skb_queue_tail(&newsk->sk_receive_queue, skb); 5653 __skb_queue_tail(&newsk->sk_receive_queue, skb);
5640 skb_set_owner_r(skb, newsk); 5654 sctp_skb_set_owner_r(skb, newsk);
5641 } 5655 }
5642 } 5656 }
5643 5657
@@ -5665,10 +5679,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5665 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 5679 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
5666 event = sctp_skb2event(skb); 5680 event = sctp_skb2event(skb);
5667 if (event->asoc == assoc) { 5681 if (event->asoc == assoc) {
5668 sock_rfree(skb); 5682 sctp_sock_rfree(skb);
5669 __skb_unlink(skb, &oldsp->pd_lobby); 5683 __skb_unlink(skb, &oldsp->pd_lobby);
5670 __skb_queue_tail(queue, skb); 5684 __skb_queue_tail(queue, skb);
5671 skb_set_owner_r(skb, newsk); 5685 sctp_skb_set_owner_r(skb, newsk);
5672 } 5686 }
5673 } 5687 }
5674 5688
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index ee236784a6bb..a015283a9087 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -55,10 +55,13 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
55 55
56 56
57/* Initialize an ULP event from an given skb. */ 57/* Initialize an ULP event from an given skb. */
58SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) 58SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event,
59 int msg_flags,
60 unsigned int len)
59{ 61{
60 memset(event, 0, sizeof(struct sctp_ulpevent)); 62 memset(event, 0, sizeof(struct sctp_ulpevent));
61 event->msg_flags = msg_flags; 63 event->msg_flags = msg_flags;
64 event->rmem_len = len;
62} 65}
63 66
64/* Create a new sctp_ulpevent. */ 67/* Create a new sctp_ulpevent. */
@@ -73,7 +76,7 @@ SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags,
73 goto fail; 76 goto fail;
74 77
75 event = sctp_skb2event(skb); 78 event = sctp_skb2event(skb);
76 sctp_ulpevent_init(event, msg_flags); 79 sctp_ulpevent_init(event, msg_flags, skb->truesize);
77 80
78 return event; 81 return event;
79 82
@@ -101,17 +104,16 @@ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
101 sctp_association_hold((struct sctp_association *)asoc); 104 sctp_association_hold((struct sctp_association *)asoc);
102 skb = sctp_event2skb(event); 105 skb = sctp_event2skb(event);
103 event->asoc = (struct sctp_association *)asoc; 106 event->asoc = (struct sctp_association *)asoc;
104 atomic_add(skb->truesize, &event->asoc->rmem_alloc); 107 atomic_add(event->rmem_len, &event->asoc->rmem_alloc);
105 skb_set_owner_r(skb, asoc->base.sk); 108 sctp_skb_set_owner_r(skb, asoc->base.sk);
106} 109}
107 110
108/* A simple destructor to give up the reference to the association. */ 111/* A simple destructor to give up the reference to the association. */
109static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) 112static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
110{ 113{
111 struct sctp_association *asoc = event->asoc; 114 struct sctp_association *asoc = event->asoc;
112 struct sk_buff *skb = sctp_event2skb(event);
113 115
114 atomic_sub(skb->truesize, &asoc->rmem_alloc); 116 atomic_sub(event->rmem_len, &asoc->rmem_alloc);
115 sctp_association_put(asoc); 117 sctp_association_put(asoc);
116} 118}
117 119
@@ -372,7 +374,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
372 374
373 /* Embed the event fields inside the cloned skb. */ 375 /* Embed the event fields inside the cloned skb. */
374 event = sctp_skb2event(skb); 376 event = sctp_skb2event(skb);
375 sctp_ulpevent_init(event, MSG_NOTIFICATION); 377 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
376 378
377 sre = (struct sctp_remote_error *) 379 sre = (struct sctp_remote_error *)
378 skb_push(skb, sizeof(struct sctp_remote_error)); 380 skb_push(skb, sizeof(struct sctp_remote_error));
@@ -464,7 +466,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
464 466
465 /* Embed the event fields inside the cloned skb. */ 467 /* Embed the event fields inside the cloned skb. */
466 event = sctp_skb2event(skb); 468 event = sctp_skb2event(skb);
467 sctp_ulpevent_init(event, MSG_NOTIFICATION); 469 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
468 470
469 ssf = (struct sctp_send_failed *) 471 ssf = (struct sctp_send_failed *)
470 skb_push(skb, sizeof(struct sctp_send_failed)); 472 skb_push(skb, sizeof(struct sctp_send_failed));
@@ -682,8 +684,11 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
682 /* Embed the event fields inside the cloned skb. */ 684 /* Embed the event fields inside the cloned skb. */
683 event = sctp_skb2event(skb); 685 event = sctp_skb2event(skb);
684 686
685 /* Initialize event with flags 0. */ 687 /* Initialize event with flags 0 and correct length
686 sctp_ulpevent_init(event, 0); 688 * Since this is a clone of the original skb, only account for
689 * the data of this chunk as other chunks will be accounted separately.
690 */
691 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
687 692
688 sctp_ulpevent_receive_data(event, asoc); 693 sctp_ulpevent_receive_data(event, asoc);
689 694
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 575e556aeb3e..e1d144275f97 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -309,7 +309,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
309 if (!new) 309 if (!new)
310 return NULL; /* try again later */ 310 return NULL; /* try again later */
311 311
312 new->sk = f_frag->sk; 312 sctp_skb_set_owner_r(new, f_frag->sk);
313 313
314 skb_shinfo(new)->frag_list = pos; 314 skb_shinfo(new)->frag_list = pos;
315 } else 315 } else
diff --git a/net/socket.c b/net/socket.c
index 1bc4167e0da8..6c9b9b326d76 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -95,10 +95,10 @@
95#include <linux/netfilter.h> 95#include <linux/netfilter.h>
96 96
97static int sock_no_open(struct inode *irrelevant, struct file *dontcare); 97static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
98static ssize_t sock_aio_read(struct kiocb *iocb, char __user *buf, 98static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
99 size_t size, loff_t pos); 99 unsigned long nr_segs, loff_t pos);
100static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *buf, 100static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
101 size_t size, loff_t pos); 101 unsigned long nr_segs, loff_t pos);
102static int sock_mmap(struct file *file, struct vm_area_struct *vma); 102static int sock_mmap(struct file *file, struct vm_area_struct *vma);
103 103
104static int sock_close(struct inode *inode, struct file *file); 104static int sock_close(struct inode *inode, struct file *file);
@@ -110,10 +110,6 @@ static long compat_sock_ioctl(struct file *file,
110 unsigned int cmd, unsigned long arg); 110 unsigned int cmd, unsigned long arg);
111#endif 111#endif
112static int sock_fasync(int fd, struct file *filp, int on); 112static int sock_fasync(int fd, struct file *filp, int on);
113static ssize_t sock_readv(struct file *file, const struct iovec *vector,
114 unsigned long count, loff_t *ppos);
115static ssize_t sock_writev(struct file *file, const struct iovec *vector,
116 unsigned long count, loff_t *ppos);
117static ssize_t sock_sendpage(struct file *file, struct page *page, 113static ssize_t sock_sendpage(struct file *file, struct page *page,
118 int offset, size_t size, loff_t *ppos, int more); 114 int offset, size_t size, loff_t *ppos, int more);
119 115
@@ -136,8 +132,6 @@ static struct file_operations socket_file_ops = {
136 .open = sock_no_open, /* special open code to disallow open via /proc */ 132 .open = sock_no_open, /* special open code to disallow open via /proc */
137 .release = sock_close, 133 .release = sock_close,
138 .fasync = sock_fasync, 134 .fasync = sock_fasync,
139 .readv = sock_readv,
140 .writev = sock_writev,
141 .sendpage = sock_sendpage, 135 .sendpage = sock_sendpage,
142 .splice_write = generic_splice_sendpage, 136 .splice_write = generic_splice_sendpage,
143}; 137};
@@ -664,7 +658,6 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
664} 658}
665 659
666static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, 660static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
667 char __user *ubuf, size_t size,
668 struct sock_iocb *siocb) 661 struct sock_iocb *siocb)
669{ 662{
670 if (!is_sync_kiocb(iocb)) { 663 if (!is_sync_kiocb(iocb)) {
@@ -675,16 +668,13 @@ static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
675 } 668 }
676 669
677 siocb->kiocb = iocb; 670 siocb->kiocb = iocb;
678 siocb->async_iov.iov_base = ubuf;
679 siocb->async_iov.iov_len = size;
680
681 iocb->private = siocb; 671 iocb->private = siocb;
682 return siocb; 672 return siocb;
683} 673}
684 674
685static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, 675static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
686 struct file *file, struct iovec *iov, 676 struct file *file, const struct iovec *iov,
687 unsigned long nr_segs) 677 unsigned long nr_segs)
688{ 678{
689 struct socket *sock = file->private_data; 679 struct socket *sock = file->private_data;
690 size_t size = 0; 680 size_t size = 0;
@@ -704,43 +694,27 @@ static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
704 return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); 694 return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
705} 695}
706 696
707static ssize_t sock_readv(struct file *file, const struct iovec *iov, 697static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
708 unsigned long nr_segs, loff_t *ppos) 698 unsigned long nr_segs, loff_t pos)
709{
710 struct kiocb iocb;
711 struct sock_iocb siocb;
712 struct msghdr msg;
713 int ret;
714
715 init_sync_kiocb(&iocb, NULL);
716 iocb.private = &siocb;
717
718 ret = do_sock_read(&msg, &iocb, file, (struct iovec *)iov, nr_segs);
719 if (-EIOCBQUEUED == ret)
720 ret = wait_on_sync_kiocb(&iocb);
721 return ret;
722}
723
724static ssize_t sock_aio_read(struct kiocb *iocb, char __user *ubuf,
725 size_t count, loff_t pos)
726{ 699{
727 struct sock_iocb siocb, *x; 700 struct sock_iocb siocb, *x;
728 701
729 if (pos != 0) 702 if (pos != 0)
730 return -ESPIPE; 703 return -ESPIPE;
731 if (count == 0) /* Match SYS5 behaviour */ 704
705 if (iocb->ki_left == 0) /* Match SYS5 behaviour */
732 return 0; 706 return 0;
733 707
734 x = alloc_sock_iocb(iocb, ubuf, count, &siocb); 708
709 x = alloc_sock_iocb(iocb, &siocb);
735 if (!x) 710 if (!x)
736 return -ENOMEM; 711 return -ENOMEM;
737 return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, 712 return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
738 &x->async_iov, 1);
739} 713}
740 714
741static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, 715static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
742 struct file *file, struct iovec *iov, 716 struct file *file, const struct iovec *iov,
743 unsigned long nr_segs) 717 unsigned long nr_segs)
744{ 718{
745 struct socket *sock = file->private_data; 719 struct socket *sock = file->private_data;
746 size_t size = 0; 720 size_t size = 0;
@@ -762,39 +736,22 @@ static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
762 return __sock_sendmsg(iocb, sock, msg, size); 736 return __sock_sendmsg(iocb, sock, msg, size);
763} 737}
764 738
765static ssize_t sock_writev(struct file *file, const struct iovec *iov, 739static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
766 unsigned long nr_segs, loff_t *ppos) 740 unsigned long nr_segs, loff_t pos)
767{
768 struct msghdr msg;
769 struct kiocb iocb;
770 struct sock_iocb siocb;
771 int ret;
772
773 init_sync_kiocb(&iocb, NULL);
774 iocb.private = &siocb;
775
776 ret = do_sock_write(&msg, &iocb, file, (struct iovec *)iov, nr_segs);
777 if (-EIOCBQUEUED == ret)
778 ret = wait_on_sync_kiocb(&iocb);
779 return ret;
780}
781
782static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf,
783 size_t count, loff_t pos)
784{ 741{
785 struct sock_iocb siocb, *x; 742 struct sock_iocb siocb, *x;
786 743
787 if (pos != 0) 744 if (pos != 0)
788 return -ESPIPE; 745 return -ESPIPE;
789 if (count == 0) /* Match SYS5 behaviour */ 746
747 if (iocb->ki_left == 0) /* Match SYS5 behaviour */
790 return 0; 748 return 0;
791 749
792 x = alloc_sock_iocb(iocb, (void __user *)ubuf, count, &siocb); 750 x = alloc_sock_iocb(iocb, &siocb);
793 if (!x) 751 if (!x)
794 return -ENOMEM; 752 return -ENOMEM;
795 753
796 return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, 754 return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
797 &x->async_iov, 1);
798} 755}
799 756
800/* 757/*
@@ -868,7 +825,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
868 break; 825 break;
869 case FIOGETOWN: 826 case FIOGETOWN:
870 case SIOCGPGRP: 827 case SIOCGPGRP:
871 err = put_user(sock->file->f_owner.pid, 828 err = put_user(f_getown(sock->file),
872 (int __user *)argp); 829 (int __user *)argp);
873 break; 830 break;
874 case SIOCGIFBR: 831 case SIOCGIFBR:
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 55163af3dcaf..993ff1a5d945 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -331,8 +331,8 @@ rpcauth_unbindcred(struct rpc_task *task)
331 task->tk_msg.rpc_cred = NULL; 331 task->tk_msg.rpc_cred = NULL;
332} 332}
333 333
334u32 * 334__be32 *
335rpcauth_marshcred(struct rpc_task *task, u32 *p) 335rpcauth_marshcred(struct rpc_task *task, __be32 *p)
336{ 336{
337 struct rpc_cred *cred = task->tk_msg.rpc_cred; 337 struct rpc_cred *cred = task->tk_msg.rpc_cred;
338 338
@@ -342,8 +342,8 @@ rpcauth_marshcred(struct rpc_task *task, u32 *p)
342 return cred->cr_ops->crmarshal(task, p); 342 return cred->cr_ops->crmarshal(task, p);
343} 343}
344 344
345u32 * 345__be32 *
346rpcauth_checkverf(struct rpc_task *task, u32 *p) 346rpcauth_checkverf(struct rpc_task *task, __be32 *p)
347{ 347{
348 struct rpc_cred *cred = task->tk_msg.rpc_cred; 348 struct rpc_cred *cred = task->tk_msg.rpc_cred;
349 349
@@ -355,7 +355,7 @@ rpcauth_checkverf(struct rpc_task *task, u32 *p)
355 355
356int 356int
357rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, 357rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
358 u32 *data, void *obj) 358 __be32 *data, void *obj)
359{ 359{
360 struct rpc_cred *cred = task->tk_msg.rpc_cred; 360 struct rpc_cred *cred = task->tk_msg.rpc_cred;
361 361
@@ -369,7 +369,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
369 369
370int 370int
371rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, 371rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
372 u32 *data, void *obj) 372 __be32 *data, void *obj)
373{ 373{
374 struct rpc_cred *cred = task->tk_msg.rpc_cred; 374 struct rpc_cred *cred = task->tk_msg.rpc_cred;
375 375
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6eed3e166ba3..b36b9463f5a4 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/net/sunrpc/auth_gss.c 2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 * 3 *
4 * RPCSEC_GSS client authentication. 4 * RPCSEC_GSS client authentication.
5 * 5 *
@@ -826,14 +826,14 @@ out:
826* Marshal credentials. 826* Marshal credentials.
827* Maybe we should keep a cached credential for performance reasons. 827* Maybe we should keep a cached credential for performance reasons.
828*/ 828*/
829static u32 * 829static __be32 *
830gss_marshal(struct rpc_task *task, u32 *p) 830gss_marshal(struct rpc_task *task, __be32 *p)
831{ 831{
832 struct rpc_cred *cred = task->tk_msg.rpc_cred; 832 struct rpc_cred *cred = task->tk_msg.rpc_cred;
833 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 833 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
834 gc_base); 834 gc_base);
835 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 835 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
836 u32 *cred_len; 836 __be32 *cred_len;
837 struct rpc_rqst *req = task->tk_rqstp; 837 struct rpc_rqst *req = task->tk_rqstp;
838 u32 maj_stat = 0; 838 u32 maj_stat = 0;
839 struct xdr_netobj mic; 839 struct xdr_netobj mic;
@@ -894,12 +894,12 @@ gss_refresh(struct rpc_task *task)
894 return 0; 894 return 0;
895} 895}
896 896
897static u32 * 897static __be32 *
898gss_validate(struct rpc_task *task, u32 *p) 898gss_validate(struct rpc_task *task, __be32 *p)
899{ 899{
900 struct rpc_cred *cred = task->tk_msg.rpc_cred; 900 struct rpc_cred *cred = task->tk_msg.rpc_cred;
901 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 901 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
902 u32 seq; 902 __be32 seq;
903 struct kvec iov; 903 struct kvec iov;
904 struct xdr_buf verf_buf; 904 struct xdr_buf verf_buf;
905 struct xdr_netobj mic; 905 struct xdr_netobj mic;
@@ -940,13 +940,14 @@ out_bad:
940 940
941static inline int 941static inline int
942gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 942gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
943 kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) 943 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
944{ 944{
945 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 945 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
946 struct xdr_buf integ_buf; 946 struct xdr_buf integ_buf;
947 u32 *integ_len = NULL; 947 __be32 *integ_len = NULL;
948 struct xdr_netobj mic; 948 struct xdr_netobj mic;
949 u32 offset, *q; 949 u32 offset;
950 __be32 *q;
950 struct kvec *iov; 951 struct kvec *iov;
951 u32 maj_stat = 0; 952 u32 maj_stat = 0;
952 int status = -EIO; 953 int status = -EIO;
@@ -1032,13 +1033,13 @@ out:
1032 1033
1033static inline int 1034static inline int
1034gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1035gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1035 kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) 1036 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
1036{ 1037{
1037 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1038 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1038 u32 offset; 1039 u32 offset;
1039 u32 maj_stat; 1040 u32 maj_stat;
1040 int status; 1041 int status;
1041 u32 *opaque_len; 1042 __be32 *opaque_len;
1042 struct page **inpages; 1043 struct page **inpages;
1043 int first; 1044 int first;
1044 int pad; 1045 int pad;
@@ -1095,7 +1096,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1095 1096
1096static int 1097static int
1097gss_wrap_req(struct rpc_task *task, 1098gss_wrap_req(struct rpc_task *task,
1098 kxdrproc_t encode, void *rqstp, u32 *p, void *obj) 1099 kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
1099{ 1100{
1100 struct rpc_cred *cred = task->tk_msg.rpc_cred; 1101 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1101 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1102 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1132,7 +1133,7 @@ out:
1132 1133
1133static inline int 1134static inline int
1134gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1135gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1135 struct rpc_rqst *rqstp, u32 **p) 1136 struct rpc_rqst *rqstp, __be32 **p)
1136{ 1137{
1137 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1138 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1138 struct xdr_buf integ_buf; 1139 struct xdr_buf integ_buf;
@@ -1169,7 +1170,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1169 1170
1170static inline int 1171static inline int
1171gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1172gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1172 struct rpc_rqst *rqstp, u32 **p) 1173 struct rpc_rqst *rqstp, __be32 **p)
1173{ 1174{
1174 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1175 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1175 u32 offset; 1176 u32 offset;
@@ -1198,13 +1199,13 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1198 1199
1199static int 1200static int
1200gss_unwrap_resp(struct rpc_task *task, 1201gss_unwrap_resp(struct rpc_task *task,
1201 kxdrproc_t decode, void *rqstp, u32 *p, void *obj) 1202 kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
1202{ 1203{
1203 struct rpc_cred *cred = task->tk_msg.rpc_cred; 1204 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1204 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1205 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1205 gc_base); 1206 gc_base);
1206 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1207 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1207 u32 *savedp = p; 1208 __be32 *savedp = p;
1208 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1209 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1209 int savedlen = head->iov_len; 1210 int savedlen = head->iov_len;
1210 int status = -EIO; 1211 int status = -EIO;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 2f312164d6d5..08601ee4cd73 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -115,7 +115,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
115 krb5_hdr = ptr - 2; 115 krb5_hdr = ptr - 2;
116 msg_start = krb5_hdr + 24; 116 msg_start = krb5_hdr + 24;
117 117
118 *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); 118 *(__be16 *)(krb5_hdr + 2) = htons(ctx->signalg);
119 memset(krb5_hdr + 4, 0xff, 4); 119 memset(krb5_hdr + 4, 0xff, 4);
120 120
121 if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) 121 if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index f179415d0c38..cc45c1605f80 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -177,9 +177,9 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
177 msg_start = krb5_hdr + 24; 177 msg_start = krb5_hdr + 24;
178 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); 178 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
179 179
180 *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg); 180 *(__be16 *)(krb5_hdr + 2) = htons(kctx->signalg);
181 memset(krb5_hdr + 4, 0xff, 4); 181 memset(krb5_hdr + 4, 0xff, 4);
182 *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg); 182 *(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
183 183
184 make_confounder(msg_start, blocksize); 184 make_confounder(msg_start, blocksize);
185 185
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 94217ec9e2dd..1f0f079ffa65 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -607,7 +607,7 @@ svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
607 607
608 if (argv->iov_len < 4) 608 if (argv->iov_len < 4)
609 return -1; 609 return -1;
610 o->len = ntohl(svc_getu32(argv)); 610 o->len = svc_getnl(argv);
611 l = round_up_to_quad(o->len); 611 l = round_up_to_quad(o->len);
612 if (argv->iov_len < l) 612 if (argv->iov_len < l)
613 return -1; 613 return -1;
@@ -620,17 +620,17 @@ svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
620static inline int 620static inline int
621svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o) 621svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
622{ 622{
623 u32 *p; 623 u8 *p;
624 624
625 if (resv->iov_len + 4 > PAGE_SIZE) 625 if (resv->iov_len + 4 > PAGE_SIZE)
626 return -1; 626 return -1;
627 svc_putu32(resv, htonl(o->len)); 627 svc_putnl(resv, o->len);
628 p = resv->iov_base + resv->iov_len; 628 p = resv->iov_base + resv->iov_len;
629 resv->iov_len += round_up_to_quad(o->len); 629 resv->iov_len += round_up_to_quad(o->len);
630 if (resv->iov_len > PAGE_SIZE) 630 if (resv->iov_len > PAGE_SIZE)
631 return -1; 631 return -1;
632 memcpy(p, o->data, o->len); 632 memcpy(p, o->data, o->len);
633 memset((u8 *)p + o->len, 0, round_up_to_quad(o->len) - o->len); 633 memset(p + o->len, 0, round_up_to_quad(o->len) - o->len);
634 return 0; 634 return 0;
635} 635}
636 636
@@ -640,7 +640,7 @@ svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
640 */ 640 */
641static int 641static int
642gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, 642gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
643 u32 *rpcstart, struct rpc_gss_wire_cred *gc, u32 *authp) 643 __be32 *rpcstart, struct rpc_gss_wire_cred *gc, __be32 *authp)
644{ 644{
645 struct gss_ctx *ctx_id = rsci->mechctx; 645 struct gss_ctx *ctx_id = rsci->mechctx;
646 struct xdr_buf rpchdr; 646 struct xdr_buf rpchdr;
@@ -657,7 +657,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
657 *authp = rpc_autherr_badverf; 657 *authp = rpc_autherr_badverf;
658 if (argv->iov_len < 4) 658 if (argv->iov_len < 4)
659 return SVC_DENIED; 659 return SVC_DENIED;
660 flavor = ntohl(svc_getu32(argv)); 660 flavor = svc_getnl(argv);
661 if (flavor != RPC_AUTH_GSS) 661 if (flavor != RPC_AUTH_GSS)
662 return SVC_DENIED; 662 return SVC_DENIED;
663 if (svc_safe_getnetobj(argv, &checksum)) 663 if (svc_safe_getnetobj(argv, &checksum))
@@ -687,9 +687,9 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
687static int 687static int
688gss_write_null_verf(struct svc_rqst *rqstp) 688gss_write_null_verf(struct svc_rqst *rqstp)
689{ 689{
690 u32 *p; 690 __be32 *p;
691 691
692 svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_NULL)); 692 svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL);
693 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; 693 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
694 /* don't really need to check if head->iov_len > PAGE_SIZE ... */ 694 /* don't really need to check if head->iov_len > PAGE_SIZE ... */
695 *p++ = 0; 695 *p++ = 0;
@@ -701,14 +701,14 @@ gss_write_null_verf(struct svc_rqst *rqstp)
701static int 701static int
702gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 702gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
703{ 703{
704 u32 xdr_seq; 704 __be32 xdr_seq;
705 u32 maj_stat; 705 u32 maj_stat;
706 struct xdr_buf verf_data; 706 struct xdr_buf verf_data;
707 struct xdr_netobj mic; 707 struct xdr_netobj mic;
708 u32 *p; 708 __be32 *p;
709 struct kvec iov; 709 struct kvec iov;
710 710
711 svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS)); 711 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
712 xdr_seq = htonl(seq); 712 xdr_seq = htonl(seq);
713 713
714 iov.iov_base = &xdr_seq; 714 iov.iov_base = &xdr_seq;
@@ -782,7 +782,7 @@ EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
782static inline int 782static inline int
783read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) 783read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
784{ 784{
785 u32 raw; 785 __be32 raw;
786 int status; 786 int status;
787 787
788 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 788 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
@@ -805,7 +805,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
805 struct xdr_netobj mic; 805 struct xdr_netobj mic;
806 struct xdr_buf integ_buf; 806 struct xdr_buf integ_buf;
807 807
808 integ_len = ntohl(svc_getu32(&buf->head[0])); 808 integ_len = svc_getnl(&buf->head[0]);
809 if (integ_len & 3) 809 if (integ_len & 3)
810 goto out; 810 goto out;
811 if (integ_len > buf->len) 811 if (integ_len > buf->len)
@@ -825,7 +825,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
825 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); 825 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
826 if (maj_stat != GSS_S_COMPLETE) 826 if (maj_stat != GSS_S_COMPLETE)
827 goto out; 827 goto out;
828 if (ntohl(svc_getu32(&buf->head[0])) != seq) 828 if (svc_getnl(&buf->head[0]) != seq)
829 goto out; 829 goto out;
830 stat = 0; 830 stat = 0;
831out: 831out:
@@ -857,7 +857,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
857 857
858 rqstp->rq_sendfile_ok = 0; 858 rqstp->rq_sendfile_ok = 0;
859 859
860 priv_len = ntohl(svc_getu32(&buf->head[0])); 860 priv_len = svc_getnl(&buf->head[0]);
861 if (rqstp->rq_deferred) { 861 if (rqstp->rq_deferred) {
862 /* Already decrypted last time through! The sequence number 862 /* Already decrypted last time through! The sequence number
863 * check at out_seq is unnecessary but harmless: */ 863 * check at out_seq is unnecessary but harmless: */
@@ -895,7 +895,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
895 if (maj_stat != GSS_S_COMPLETE) 895 if (maj_stat != GSS_S_COMPLETE)
896 return -EINVAL; 896 return -EINVAL;
897out_seq: 897out_seq:
898 if (ntohl(svc_getu32(&buf->head[0])) != seq) 898 if (svc_getnl(&buf->head[0]) != seq)
899 return -EINVAL; 899 return -EINVAL;
900 return 0; 900 return 0;
901} 901}
@@ -903,9 +903,9 @@ out_seq:
903struct gss_svc_data { 903struct gss_svc_data {
904 /* decoded gss client cred: */ 904 /* decoded gss client cred: */
905 struct rpc_gss_wire_cred clcred; 905 struct rpc_gss_wire_cred clcred;
906 /* pointer to the beginning of the procedure-specific results, 906 /* save a pointer to the beginning of the encoded verifier,
907 * which may be encrypted/checksummed in svcauth_gss_release: */ 907 * for use in encryption/checksumming in svcauth_gss_release: */
908 u32 *body_start; 908 __be32 *verf_start;
909 struct rsc *rsci; 909 struct rsc *rsci;
910}; 910};
911 911
@@ -946,7 +946,7 @@ gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
946 * response here and return SVC_COMPLETE. 946 * response here and return SVC_COMPLETE.
947 */ 947 */
948static int 948static int
949svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) 949svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
950{ 950{
951 struct kvec *argv = &rqstp->rq_arg.head[0]; 951 struct kvec *argv = &rqstp->rq_arg.head[0];
952 struct kvec *resv = &rqstp->rq_res.head[0]; 952 struct kvec *resv = &rqstp->rq_res.head[0];
@@ -956,8 +956,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
956 struct rpc_gss_wire_cred *gc; 956 struct rpc_gss_wire_cred *gc;
957 struct rsc *rsci = NULL; 957 struct rsc *rsci = NULL;
958 struct rsi *rsip, rsikey; 958 struct rsi *rsip, rsikey;
959 u32 *rpcstart; 959 __be32 *rpcstart;
960 u32 *reject_stat = resv->iov_base + resv->iov_len; 960 __be32 *reject_stat = resv->iov_base + resv->iov_len;
961 int ret; 961 int ret;
962 962
963 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len); 963 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len);
@@ -968,7 +968,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
968 if (!svcdata) 968 if (!svcdata)
969 goto auth_err; 969 goto auth_err;
970 rqstp->rq_auth_data = svcdata; 970 rqstp->rq_auth_data = svcdata;
971 svcdata->body_start = NULL; 971 svcdata->verf_start = NULL;
972 svcdata->rsci = NULL; 972 svcdata->rsci = NULL;
973 gc = &svcdata->clcred; 973 gc = &svcdata->clcred;
974 974
@@ -985,12 +985,12 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
985 985
986 if (argv->iov_len < 5 * 4) 986 if (argv->iov_len < 5 * 4)
987 goto auth_err; 987 goto auth_err;
988 crlen = ntohl(svc_getu32(argv)); 988 crlen = svc_getnl(argv);
989 if (ntohl(svc_getu32(argv)) != RPC_GSS_VERSION) 989 if (svc_getnl(argv) != RPC_GSS_VERSION)
990 goto auth_err; 990 goto auth_err;
991 gc->gc_proc = ntohl(svc_getu32(argv)); 991 gc->gc_proc = svc_getnl(argv);
992 gc->gc_seq = ntohl(svc_getu32(argv)); 992 gc->gc_seq = svc_getnl(argv);
993 gc->gc_svc = ntohl(svc_getu32(argv)); 993 gc->gc_svc = svc_getnl(argv);
994 if (svc_safe_getnetobj(argv, &gc->gc_ctx)) 994 if (svc_safe_getnetobj(argv, &gc->gc_ctx))
995 goto auth_err; 995 goto auth_err;
996 if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4) 996 if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4)
@@ -1016,9 +1016,9 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1016 case RPC_GSS_PROC_CONTINUE_INIT: 1016 case RPC_GSS_PROC_CONTINUE_INIT:
1017 if (argv->iov_len < 2 * 4) 1017 if (argv->iov_len < 2 * 4)
1018 goto auth_err; 1018 goto auth_err;
1019 if (ntohl(svc_getu32(argv)) != RPC_AUTH_NULL) 1019 if (svc_getnl(argv) != RPC_AUTH_NULL)
1020 goto auth_err; 1020 goto auth_err;
1021 if (ntohl(svc_getu32(argv)) != 0) 1021 if (svc_getnl(argv) != 0)
1022 goto auth_err; 1022 goto auth_err;
1023 break; 1023 break;
1024 case RPC_GSS_PROC_DATA: 1024 case RPC_GSS_PROC_DATA:
@@ -1076,14 +1076,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1076 goto drop; 1076 goto drop;
1077 if (resv->iov_len + 4 > PAGE_SIZE) 1077 if (resv->iov_len + 4 > PAGE_SIZE)
1078 goto drop; 1078 goto drop;
1079 svc_putu32(resv, rpc_success); 1079 svc_putnl(resv, RPC_SUCCESS);
1080 if (svc_safe_putnetobj(resv, &rsip->out_handle)) 1080 if (svc_safe_putnetobj(resv, &rsip->out_handle))
1081 goto drop; 1081 goto drop;
1082 if (resv->iov_len + 3 * 4 > PAGE_SIZE) 1082 if (resv->iov_len + 3 * 4 > PAGE_SIZE)
1083 goto drop; 1083 goto drop;
1084 svc_putu32(resv, htonl(rsip->major_status)); 1084 svc_putnl(resv, rsip->major_status);
1085 svc_putu32(resv, htonl(rsip->minor_status)); 1085 svc_putnl(resv, rsip->minor_status);
1086 svc_putu32(resv, htonl(GSS_SEQ_WIN)); 1086 svc_putnl(resv, GSS_SEQ_WIN);
1087 if (svc_safe_putnetobj(resv, &rsip->out_token)) 1087 if (svc_safe_putnetobj(resv, &rsip->out_token))
1088 goto drop; 1088 goto drop;
1089 rqstp->rq_client = NULL; 1089 rqstp->rq_client = NULL;
@@ -1093,10 +1093,11 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1093 set_bit(CACHE_NEGATIVE, &rsci->h.flags); 1093 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1094 if (resv->iov_len + 4 > PAGE_SIZE) 1094 if (resv->iov_len + 4 > PAGE_SIZE)
1095 goto drop; 1095 goto drop;
1096 svc_putu32(resv, rpc_success); 1096 svc_putnl(resv, RPC_SUCCESS);
1097 goto complete; 1097 goto complete;
1098 case RPC_GSS_PROC_DATA: 1098 case RPC_GSS_PROC_DATA:
1099 *authp = rpcsec_gsserr_ctxproblem; 1099 *authp = rpcsec_gsserr_ctxproblem;
1100 svcdata->verf_start = resv->iov_base + resv->iov_len;
1100 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1101 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1101 goto auth_err; 1102 goto auth_err;
1102 rqstp->rq_cred = rsci->cred; 1103 rqstp->rq_cred = rsci->cred;
@@ -1110,18 +1111,16 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1110 gc->gc_seq, rsci->mechctx)) 1111 gc->gc_seq, rsci->mechctx))
1111 goto auth_err; 1112 goto auth_err;
1112 /* placeholders for length and seq. number: */ 1113 /* placeholders for length and seq. number: */
1113 svcdata->body_start = resv->iov_base + resv->iov_len; 1114 svc_putnl(resv, 0);
1114 svc_putu32(resv, 0); 1115 svc_putnl(resv, 0);
1115 svc_putu32(resv, 0);
1116 break; 1116 break;
1117 case RPC_GSS_SVC_PRIVACY: 1117 case RPC_GSS_SVC_PRIVACY:
1118 if (unwrap_priv_data(rqstp, &rqstp->rq_arg, 1118 if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
1119 gc->gc_seq, rsci->mechctx)) 1119 gc->gc_seq, rsci->mechctx))
1120 goto auth_err; 1120 goto auth_err;
1121 /* placeholders for length and seq. number: */ 1121 /* placeholders for length and seq. number: */
1122 svcdata->body_start = resv->iov_base + resv->iov_len; 1122 svc_putnl(resv, 0);
1123 svc_putu32(resv, 0); 1123 svc_putnl(resv, 0);
1124 svc_putu32(resv, 0);
1125 break; 1124 break;
1126 default: 1125 default:
1127 goto auth_err; 1126 goto auth_err;
@@ -1147,6 +1146,33 @@ out:
1147 return ret; 1146 return ret;
1148} 1147}
1149 1148
1149static __be32 *
1150svcauth_gss_prepare_to_wrap(struct xdr_buf *resbuf, struct gss_svc_data *gsd)
1151{
1152 __be32 *p;
1153 u32 verf_len;
1154
1155 p = gsd->verf_start;
1156 gsd->verf_start = NULL;
1157
1158 /* If the reply stat is nonzero, don't wrap: */
1159 if (*(p-1) != rpc_success)
1160 return NULL;
1161 /* Skip the verifier: */
1162 p += 1;
1163 verf_len = ntohl(*p++);
1164 p += XDR_QUADLEN(verf_len);
1165 /* move accept_stat to right place: */
1166 memcpy(p, p + 2, 4);
1167 /* Also don't wrap if the accept stat is nonzero: */
1168 if (*p != rpc_success) {
1169 resbuf->head[0].iov_len -= 2 * 4;
1170 return NULL;
1171 }
1172 p++;
1173 return p;
1174}
1175
1150static inline int 1176static inline int
1151svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) 1177svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1152{ 1178{
@@ -1156,21 +1182,13 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1156 struct xdr_buf integ_buf; 1182 struct xdr_buf integ_buf;
1157 struct xdr_netobj mic; 1183 struct xdr_netobj mic;
1158 struct kvec *resv; 1184 struct kvec *resv;
1159 u32 *p; 1185 __be32 *p;
1160 int integ_offset, integ_len; 1186 int integ_offset, integ_len;
1161 int stat = -EINVAL; 1187 int stat = -EINVAL;
1162 1188
1163 p = gsd->body_start; 1189 p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
1164 gsd->body_start = NULL; 1190 if (p == NULL)
1165 /* move accept_stat to right place: */
1166 memcpy(p, p + 2, 4);
1167 /* Don't wrap in failure case: */
1168 /* Counting on not getting here if call was not even accepted! */
1169 if (*p != rpc_success) {
1170 resbuf->head[0].iov_len -= 2 * 4;
1171 goto out; 1191 goto out;
1172 }
1173 p++;
1174 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; 1192 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1175 integ_len = resbuf->len - integ_offset; 1193 integ_len = resbuf->len - integ_offset;
1176 BUG_ON(integ_len % 4); 1194 BUG_ON(integ_len % 4);
@@ -1191,7 +1209,6 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1209 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1192 + resbuf->head[0].iov_len; 1210 + resbuf->head[0].iov_len;
1193 resbuf->tail[0].iov_len = 0; 1211 resbuf->tail[0].iov_len = 0;
1194 rqstp->rq_restailpage = 0;
1195 resv = &resbuf->tail[0]; 1212 resv = &resbuf->tail[0];
1196 } else { 1213 } else {
1197 resv = &resbuf->tail[0]; 1214 resv = &resbuf->tail[0];
@@ -1199,7 +1216,7 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1199 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; 1216 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
1200 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) 1217 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
1201 goto out_err; 1218 goto out_err;
1202 svc_putu32(resv, htonl(mic.len)); 1219 svc_putnl(resv, mic.len);
1203 memset(mic.data + mic.len, 0, 1220 memset(mic.data + mic.len, 0,
1204 round_up_to_quad(mic.len) - mic.len); 1221 round_up_to_quad(mic.len) - mic.len);
1205 resv->iov_len += XDR_QUADLEN(mic.len) << 2; 1222 resv->iov_len += XDR_QUADLEN(mic.len) << 2;
@@ -1219,28 +1236,20 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1219 struct rpc_gss_wire_cred *gc = &gsd->clcred; 1236 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1220 struct xdr_buf *resbuf = &rqstp->rq_res; 1237 struct xdr_buf *resbuf = &rqstp->rq_res;
1221 struct page **inpages = NULL; 1238 struct page **inpages = NULL;
1222 u32 *p; 1239 __be32 *p, *len;
1223 int offset, *len; 1240 int offset;
1224 int pad; 1241 int pad;
1225 1242
1226 p = gsd->body_start; 1243 p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
1227 gsd->body_start = NULL; 1244 if (p == NULL)
1228 /* move accept_stat to right place: */
1229 memcpy(p, p + 2, 4);
1230 /* Don't wrap in failure case: */
1231 /* Counting on not getting here if call was not even accepted! */
1232 if (*p != rpc_success) {
1233 resbuf->head[0].iov_len -= 2 * 4;
1234 return 0; 1245 return 0;
1235 }
1236 p++;
1237 len = p++; 1246 len = p++;
1238 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base; 1247 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
1239 *p++ = htonl(gc->gc_seq); 1248 *p++ = htonl(gc->gc_seq);
1240 inpages = resbuf->pages; 1249 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for 1250 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ 1251 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) { 1252 if (resbuf->tail[0].iov_base) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base 1253 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE); 1254 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); 1255 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
@@ -1258,13 +1267,12 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1267 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; 1268 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0; 1269 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 } 1270 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) 1271 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM; 1272 return -ENOMEM;
1265 *len = htonl(resbuf->len - offset); 1273 *len = htonl(resbuf->len - offset);
1266 pad = 3 - ((resbuf->len - offset - 1)&3); 1274 pad = 3 - ((resbuf->len - offset - 1)&3);
1267 p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len); 1275 p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
1268 memset(p, 0, pad); 1276 memset(p, 0, pad);
1269 resbuf->tail[0].iov_len += pad; 1277 resbuf->tail[0].iov_len += pad;
1270 resbuf->len += pad; 1278 resbuf->len += pad;
@@ -1282,7 +1290,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1282 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1290 if (gc->gc_proc != RPC_GSS_PROC_DATA)
1283 goto out; 1291 goto out;
1284 /* Release can be called twice, but we only wrap once. */ 1292 /* Release can be called twice, but we only wrap once. */
1285 if (gsd->body_start == NULL) 1293 if (gsd->verf_start == NULL)
1286 goto out; 1294 goto out;
1287 /* normally not set till svc_send, but we need it here: */ 1295 /* normally not set till svc_send, but we need it here: */
1288 /* XXX: what for? Do we mess it up the moment we call svc_putu32 1296 /* XXX: what for? Do we mess it up the moment we call svc_putu32
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 2eccffa96ba1..3be257dc32b2 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -60,8 +60,8 @@ nul_match(struct auth_cred *acred, struct rpc_cred *cred, int taskflags)
60/* 60/*
61 * Marshal credential. 61 * Marshal credential.
62 */ 62 */
63static u32 * 63static __be32 *
64nul_marshal(struct rpc_task *task, u32 *p) 64nul_marshal(struct rpc_task *task, __be32 *p)
65{ 65{
66 *p++ = htonl(RPC_AUTH_NULL); 66 *p++ = htonl(RPC_AUTH_NULL);
67 *p++ = 0; 67 *p++ = 0;
@@ -81,8 +81,8 @@ nul_refresh(struct rpc_task *task)
81 return 0; 81 return 0;
82} 82}
83 83
84static u32 * 84static __be32 *
85nul_validate(struct rpc_task *task, u32 *p) 85nul_validate(struct rpc_task *task, __be32 *p)
86{ 86{
87 rpc_authflavor_t flavor; 87 rpc_authflavor_t flavor;
88 u32 size; 88 u32 size;
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 74c7406a1054..f7f990c9afe2 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -137,12 +137,12 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
137 * Marshal credentials. 137 * Marshal credentials.
138 * Maybe we should keep a cached credential for performance reasons. 138 * Maybe we should keep a cached credential for performance reasons.
139 */ 139 */
140static u32 * 140static __be32 *
141unx_marshal(struct rpc_task *task, u32 *p) 141unx_marshal(struct rpc_task *task, __be32 *p)
142{ 142{
143 struct rpc_clnt *clnt = task->tk_client; 143 struct rpc_clnt *clnt = task->tk_client;
144 struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred; 144 struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred;
145 u32 *base, *hold; 145 __be32 *base, *hold;
146 int i; 146 int i;
147 147
148 *p++ = htonl(RPC_AUTH_UNIX); 148 *p++ = htonl(RPC_AUTH_UNIX);
@@ -178,8 +178,8 @@ unx_refresh(struct rpc_task *task)
178 return 0; 178 return 0;
179} 179}
180 180
181static u32 * 181static __be32 *
182unx_validate(struct rpc_task *task, u32 *p) 182unx_validate(struct rpc_task *task, __be32 *p)
183{ 183{
184 rpc_authflavor_t flavor; 184 rpc_authflavor_t flavor;
185 u32 size; 185 u32 size;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 084a0ad5c64e..78696f2dc7d6 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -60,8 +60,8 @@ static void call_refreshresult(struct rpc_task *task);
60static void call_timeout(struct rpc_task *task); 60static void call_timeout(struct rpc_task *task);
61static void call_connect(struct rpc_task *task); 61static void call_connect(struct rpc_task *task);
62static void call_connect_status(struct rpc_task *task); 62static void call_connect_status(struct rpc_task *task);
63static u32 * call_header(struct rpc_task *task); 63static __be32 * call_header(struct rpc_task *task);
64static u32 * call_verify(struct rpc_task *task); 64static __be32 * call_verify(struct rpc_task *task);
65 65
66 66
67static int 67static int
@@ -161,10 +161,10 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
161 } 161 }
162 162
163 /* save the nodename */ 163 /* save the nodename */
164 clnt->cl_nodelen = strlen(system_utsname.nodename); 164 clnt->cl_nodelen = strlen(utsname()->nodename);
165 if (clnt->cl_nodelen > UNX_MAXNODENAME) 165 if (clnt->cl_nodelen > UNX_MAXNODENAME)
166 clnt->cl_nodelen = UNX_MAXNODENAME; 166 clnt->cl_nodelen = UNX_MAXNODENAME;
167 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); 167 memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
168 return clnt; 168 return clnt;
169 169
170out_no_auth: 170out_no_auth:
@@ -782,7 +782,7 @@ call_encode(struct rpc_task *task)
782 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 782 struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
783 unsigned int bufsiz; 783 unsigned int bufsiz;
784 kxdrproc_t encode; 784 kxdrproc_t encode;
785 u32 *p; 785 __be32 *p;
786 786
787 dprintk("RPC: %4d call_encode (status %d)\n", 787 dprintk("RPC: %4d call_encode (status %d)\n",
788 task->tk_pid, task->tk_status); 788 task->tk_pid, task->tk_status);
@@ -1100,7 +1100,7 @@ call_decode(struct rpc_task *task)
1100 struct rpc_clnt *clnt = task->tk_client; 1100 struct rpc_clnt *clnt = task->tk_client;
1101 struct rpc_rqst *req = task->tk_rqstp; 1101 struct rpc_rqst *req = task->tk_rqstp;
1102 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1102 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
1103 u32 *p; 1103 __be32 *p;
1104 1104
1105 dprintk("RPC: %4d call_decode (status %d)\n", 1105 dprintk("RPC: %4d call_decode (status %d)\n",
1106 task->tk_pid, task->tk_status); 1106 task->tk_pid, task->tk_status);
@@ -1197,12 +1197,12 @@ call_refreshresult(struct rpc_task *task)
1197/* 1197/*
1198 * Call header serialization 1198 * Call header serialization
1199 */ 1199 */
1200static u32 * 1200static __be32 *
1201call_header(struct rpc_task *task) 1201call_header(struct rpc_task *task)
1202{ 1202{
1203 struct rpc_clnt *clnt = task->tk_client; 1203 struct rpc_clnt *clnt = task->tk_client;
1204 struct rpc_rqst *req = task->tk_rqstp; 1204 struct rpc_rqst *req = task->tk_rqstp;
1205 u32 *p = req->rq_svec[0].iov_base; 1205 __be32 *p = req->rq_svec[0].iov_base;
1206 1206
1207 /* FIXME: check buffer size? */ 1207 /* FIXME: check buffer size? */
1208 1208
@@ -1221,12 +1221,13 @@ call_header(struct rpc_task *task)
1221/* 1221/*
1222 * Reply header verification 1222 * Reply header verification
1223 */ 1223 */
1224static u32 * 1224static __be32 *
1225call_verify(struct rpc_task *task) 1225call_verify(struct rpc_task *task)
1226{ 1226{
1227 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1227 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1228 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1228 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1229 u32 *p = iov->iov_base, n; 1229 __be32 *p = iov->iov_base;
1230 u32 n;
1230 int error = -EACCES; 1231 int error = -EACCES;
1231 1232
1232 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { 1233 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
@@ -1303,7 +1304,7 @@ call_verify(struct rpc_task *task)
1303 printk(KERN_WARNING "call_verify: auth check failed\n"); 1304 printk(KERN_WARNING "call_verify: auth check failed\n");
1304 goto out_garbage; /* bad verifier, retry */ 1305 goto out_garbage; /* bad verifier, retry */
1305 } 1306 }
1306 len = p - (u32 *)iov->iov_base - 1; 1307 len = p - (__be32 *)iov->iov_base - 1;
1307 if (len < 0) 1308 if (len < 0)
1308 goto out_overflow; 1309 goto out_overflow;
1309 switch ((n = ntohl(*p++))) { 1310 switch ((n = ntohl(*p++))) {
@@ -1358,12 +1359,12 @@ out_overflow:
1358 goto out_garbage; 1359 goto out_garbage;
1359} 1360}
1360 1361
1361static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1362static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
1362{ 1363{
1363 return 0; 1364 return 0;
1364} 1365}
1365 1366
1366static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1367static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
1367{ 1368{
1368 return 0; 1369 return 0;
1369} 1370}
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index c04609d3476a..e52afab413de 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -101,11 +101,13 @@ void rpc_getport(struct rpc_task *task)
101 /* Autobind on cloned rpc clients is discouraged */ 101 /* Autobind on cloned rpc clients is discouraged */
102 BUG_ON(clnt->cl_parent != clnt); 102 BUG_ON(clnt->cl_parent != clnt);
103 103
104 if (xprt_test_and_set_binding(xprt)) { 104 /* Put self on queue before sending rpcbind request, in case
105 task->tk_status = -EACCES; /* tell caller to check again */ 105 * pmap_getport_done completes before we return from rpc_run_task */
106 rpc_sleep_on(&xprt->binding, task, NULL, NULL); 106 rpc_sleep_on(&xprt->binding, task, NULL, NULL);
107 return; 107
108 } 108 status = -EACCES; /* tell caller to check again */
109 if (xprt_test_and_set_binding(xprt))
110 goto bailout_nofree;
109 111
110 /* Someone else may have bound if we slept */ 112 /* Someone else may have bound if we slept */
111 status = 0; 113 status = 0;
@@ -134,8 +136,6 @@ void rpc_getport(struct rpc_task *task)
134 goto bailout; 136 goto bailout;
135 rpc_release_task(child); 137 rpc_release_task(child);
136 138
137 rpc_sleep_on(&xprt->binding, task, NULL, NULL);
138
139 task->tk_xprt->stat.bind_count++; 139 task->tk_xprt->stat.bind_count++;
140 return; 140 return;
141 141
@@ -300,7 +300,7 @@ static struct rpc_clnt *pmap_create(char *hostname, struct sockaddr_in *srvaddr,
300/* 300/*
301 * XDR encode/decode functions for PMAP 301 * XDR encode/decode functions for PMAP
302 */ 302 */
303static int xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct portmap_args *map) 303static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map)
304{ 304{
305 dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", 305 dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n",
306 map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); 306 map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port);
@@ -313,13 +313,13 @@ static int xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct portmap_args
313 return 0; 313 return 0;
314} 314}
315 315
316static int xdr_decode_port(struct rpc_rqst *req, u32 *p, unsigned short *portp) 316static int xdr_decode_port(struct rpc_rqst *req, __be32 *p, unsigned short *portp)
317{ 317{
318 *portp = (unsigned short) ntohl(*p++); 318 *portp = (unsigned short) ntohl(*p++);
319 return 0; 319 return 0;
320} 320}
321 321
322static int xdr_decode_bool(struct rpc_rqst *req, u32 *p, unsigned int *boolp) 322static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp)
323{ 323{
324 *boolp = (unsigned int) ntohl(*p++); 324 *boolp = (unsigned int) ntohl(*p++);
325 return 0; 325 return 0;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index dfa504fe383f..9a0b41a97f90 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -488,14 +488,13 @@ rpc_get_inode(struct super_block *sb, int mode)
488 return NULL; 488 return NULL;
489 inode->i_mode = mode; 489 inode->i_mode = mode;
490 inode->i_uid = inode->i_gid = 0; 490 inode->i_uid = inode->i_gid = 0;
491 inode->i_blksize = PAGE_CACHE_SIZE;
492 inode->i_blocks = 0; 491 inode->i_blocks = 0;
493 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 492 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
494 switch(mode & S_IFMT) { 493 switch(mode & S_IFMT) {
495 case S_IFDIR: 494 case S_IFDIR:
496 inode->i_fop = &simple_dir_operations; 495 inode->i_fop = &simple_dir_operations;
497 inode->i_op = &simple_dir_inode_operations; 496 inode->i_op = &simple_dir_inode_operations;
498 inode->i_nlink++; 497 inc_nlink(inode);
499 default: 498 default:
500 break; 499 break;
501 } 500 }
@@ -572,7 +571,7 @@ rpc_populate(struct dentry *parent,
572 if (private) 571 if (private)
573 rpc_inode_setowner(inode, private); 572 rpc_inode_setowner(inode, private);
574 if (S_ISDIR(mode)) 573 if (S_ISDIR(mode))
575 dir->i_nlink++; 574 inc_nlink(dir);
576 d_add(dentry, inode); 575 d_add(dentry, inode);
577 } 576 }
578 mutex_unlock(&dir->i_mutex); 577 mutex_unlock(&dir->i_mutex);
@@ -594,7 +593,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry)
594 goto out_err; 593 goto out_err;
595 inode->i_ino = iunique(dir->i_sb, 100); 594 inode->i_ino = iunique(dir->i_sb, 100);
596 d_instantiate(dentry, inode); 595 d_instantiate(dentry, inode);
597 dir->i_nlink++; 596 inc_nlink(dir);
598 inode_dir_notify(dir, DN_CREATE); 597 inode_dir_notify(dir, DN_CREATE);
599 return 0; 598 return 0;
600out_err: 599out_err:
@@ -858,7 +857,6 @@ int register_rpc_pipefs(void)
858 857
859void unregister_rpc_pipefs(void) 858void unregister_rpc_pipefs(void)
860{ 859{
861 if (kmem_cache_destroy(rpc_inode_cachep)) 860 kmem_cache_destroy(rpc_inode_cachep);
862 printk(KERN_WARNING "RPC: unable to free inode cache\n");
863 unregister_filesystem(&rpc_pipe_fs_type); 861 unregister_filesystem(&rpc_pipe_fs_type);
864} 862}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6390461a9756..a1ab4eed41f4 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -1059,10 +1059,10 @@ rpc_destroy_mempool(void)
1059 mempool_destroy(rpc_buffer_mempool); 1059 mempool_destroy(rpc_buffer_mempool);
1060 if (rpc_task_mempool) 1060 if (rpc_task_mempool)
1061 mempool_destroy(rpc_task_mempool); 1061 mempool_destroy(rpc_task_mempool);
1062 if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp)) 1062 if (rpc_task_slabp)
1063 printk(KERN_INFO "rpc_task: not all structures were freed\n"); 1063 kmem_cache_destroy(rpc_task_slabp);
1064 if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp)) 1064 if (rpc_buffer_slabp)
1065 printk(KERN_INFO "rpc_buffers: not all structures were freed\n"); 1065 kmem_cache_destroy(rpc_buffer_slabp);
1066} 1066}
1067 1067
1068int 1068int
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 26c0531d7e25..192dff5dabcb 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -70,6 +70,8 @@ EXPORT_SYMBOL(put_rpccred);
70/* RPC server stuff */ 70/* RPC server stuff */
71EXPORT_SYMBOL(svc_create); 71EXPORT_SYMBOL(svc_create);
72EXPORT_SYMBOL(svc_create_thread); 72EXPORT_SYMBOL(svc_create_thread);
73EXPORT_SYMBOL(svc_create_pooled);
74EXPORT_SYMBOL(svc_set_num_threads);
73EXPORT_SYMBOL(svc_exit_thread); 75EXPORT_SYMBOL(svc_exit_thread);
74EXPORT_SYMBOL(svc_destroy); 76EXPORT_SYMBOL(svc_destroy);
75EXPORT_SYMBOL(svc_drop); 77EXPORT_SYMBOL(svc_drop);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b76a227dd3ad..eb44ec929ca1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -4,6 +4,10 @@
4 * High-level RPC service routines 4 * High-level RPC service routines
5 * 5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 *
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
7 */ 11 */
8 12
9#include <linux/linkage.h> 13#include <linux/linkage.h>
@@ -12,6 +16,8 @@
12#include <linux/net.h> 16#include <linux/net.h>
13#include <linux/in.h> 17#include <linux/in.h>
14#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
15 21
16#include <linux/sunrpc/types.h> 22#include <linux/sunrpc/types.h>
17#include <linux/sunrpc/xdr.h> 23#include <linux/sunrpc/xdr.h>
@@ -23,14 +29,252 @@
23#define RPC_PARANOIA 1 29#define RPC_PARANOIA 1
24 30
25/* 31/*
32 * Mode for mapping cpus to pools.
33 */
34enum {
35 SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */
36 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
37 * (legacy & UP mode) */
38 SVC_POOL_PERCPU, /* one pool per cpu */
39 SVC_POOL_PERNODE /* one pool per numa node */
40};
41
42/*
43 * Structure for mapping cpus to pools and vice versa.
44 * Setup once during sunrpc initialisation.
45 */
46static struct svc_pool_map {
47 int mode; /* Note: int not enum to avoid
48 * warnings about "enumeration value
49 * not handled in switch" */
50 unsigned int npools;
51 unsigned int *pool_to; /* maps pool id to cpu or node */
52 unsigned int *to_pool; /* maps cpu or node to pool id */
53} svc_pool_map = {
54 .mode = SVC_POOL_NONE
55};
56
57
58/*
59 * Detect best pool mapping mode heuristically,
60 * according to the machine's topology.
61 */
62static int
63svc_pool_map_choose_mode(void)
64{
65 unsigned int node;
66
67 if (num_online_nodes() > 1) {
68 /*
69 * Actually have multiple NUMA nodes,
70 * so split pools on NUMA node boundaries
71 */
72 return SVC_POOL_PERNODE;
73 }
74
75 node = any_online_node(node_online_map);
76 if (nr_cpus_node(node) > 2) {
77 /*
78 * Non-trivial SMP, or CONFIG_NUMA on
79 * non-NUMA hardware, e.g. with a generic
80 * x86_64 kernel on Xeons. In this case we
81 * want to divide the pools on cpu boundaries.
82 */
83 return SVC_POOL_PERCPU;
84 }
85
86 /* default: one global pool */
87 return SVC_POOL_GLOBAL;
88}
89
90/*
91 * Allocate the to_pool[] and pool_to[] arrays.
92 * Returns 0 on success or an errno.
93 */
94static int
95svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
96{
97 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
98 if (!m->to_pool)
99 goto fail;
100 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
101 if (!m->pool_to)
102 goto fail_free;
103
104 return 0;
105
106fail_free:
107 kfree(m->to_pool);
108fail:
109 return -ENOMEM;
110}
111
112/*
113 * Initialise the pool map for SVC_POOL_PERCPU mode.
114 * Returns number of pools or <0 on error.
115 */
116static int
117svc_pool_map_init_percpu(struct svc_pool_map *m)
118{
119 unsigned int maxpools = highest_possible_processor_id()+1;
120 unsigned int pidx = 0;
121 unsigned int cpu;
122 int err;
123
124 err = svc_pool_map_alloc_arrays(m, maxpools);
125 if (err)
126 return err;
127
128 for_each_online_cpu(cpu) {
129 BUG_ON(pidx > maxpools);
130 m->to_pool[cpu] = pidx;
131 m->pool_to[pidx] = cpu;
132 pidx++;
133 }
134 /* cpus brought online later all get mapped to pool0, sorry */
135
136 return pidx;
137};
138
139
140/*
141 * Initialise the pool map for SVC_POOL_PERNODE mode.
142 * Returns number of pools or <0 on error.
143 */
144static int
145svc_pool_map_init_pernode(struct svc_pool_map *m)
146{
147 unsigned int maxpools = highest_possible_node_id()+1;
148 unsigned int pidx = 0;
149 unsigned int node;
150 int err;
151
152 err = svc_pool_map_alloc_arrays(m, maxpools);
153 if (err)
154 return err;
155
156 for_each_node_with_cpus(node) {
157 /* some architectures (e.g. SN2) have cpuless nodes */
158 BUG_ON(pidx > maxpools);
159 m->to_pool[node] = pidx;
160 m->pool_to[pidx] = node;
161 pidx++;
162 }
163 /* nodes brought online later all get mapped to pool0, sorry */
164
165 return pidx;
166}
167
168
169/*
170 * Build the global map of cpus to pools and vice versa.
171 */
172static unsigned int
173svc_pool_map_init(void)
174{
175 struct svc_pool_map *m = &svc_pool_map;
176 int npools = -1;
177
178 if (m->mode != SVC_POOL_NONE)
179 return m->npools;
180
181 m->mode = svc_pool_map_choose_mode();
182
183 switch (m->mode) {
184 case SVC_POOL_PERCPU:
185 npools = svc_pool_map_init_percpu(m);
186 break;
187 case SVC_POOL_PERNODE:
188 npools = svc_pool_map_init_pernode(m);
189 break;
190 }
191
192 if (npools < 0) {
193 /* default, or memory allocation failure */
194 npools = 1;
195 m->mode = SVC_POOL_GLOBAL;
196 }
197 m->npools = npools;
198
199 return m->npools;
200}
201
202/*
203 * Set the current thread's cpus_allowed mask so that it
204 * will only run on cpus in the given pool.
205 *
206 * Returns 1 and fills in oldmask iff a cpumask was applied.
207 */
208static inline int
209svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
210{
211 struct svc_pool_map *m = &svc_pool_map;
212 unsigned int node; /* or cpu */
213
214 /*
215 * The caller checks for sv_nrpools > 1, which
216 * implies that we've been initialized and the
217 * map mode is not NONE.
218 */
219 BUG_ON(m->mode == SVC_POOL_NONE);
220
221 switch (m->mode)
222 {
223 default:
224 return 0;
225 case SVC_POOL_PERCPU:
226 node = m->pool_to[pidx];
227 *oldmask = current->cpus_allowed;
228 set_cpus_allowed(current, cpumask_of_cpu(node));
229 return 1;
230 case SVC_POOL_PERNODE:
231 node = m->pool_to[pidx];
232 *oldmask = current->cpus_allowed;
233 set_cpus_allowed(current, node_to_cpumask(node));
234 return 1;
235 }
236}
237
238/*
239 * Use the mapping mode to choose a pool for a given CPU.
240 * Used when enqueueing an incoming RPC. Always returns
241 * a non-NULL pool pointer.
242 */
243struct svc_pool *
244svc_pool_for_cpu(struct svc_serv *serv, int cpu)
245{
246 struct svc_pool_map *m = &svc_pool_map;
247 unsigned int pidx = 0;
248
249 /*
250 * SVC_POOL_NONE happens in a pure client when
251 * lockd is brought up, so silently treat it the
252 * same as SVC_POOL_GLOBAL.
253 */
254
255 switch (m->mode) {
256 case SVC_POOL_PERCPU:
257 pidx = m->to_pool[cpu];
258 break;
259 case SVC_POOL_PERNODE:
260 pidx = m->to_pool[cpu_to_node(cpu)];
261 break;
262 }
263 return &serv->sv_pools[pidx % serv->sv_nrpools];
264}
265
266
267/*
26 * Create an RPC service 268 * Create an RPC service
27 */ 269 */
28struct svc_serv * 270static struct svc_serv *
29svc_create(struct svc_program *prog, unsigned int bufsize) 271__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
272 void (*shutdown)(struct svc_serv *serv))
30{ 273{
31 struct svc_serv *serv; 274 struct svc_serv *serv;
32 int vers; 275 int vers;
33 unsigned int xdrsize; 276 unsigned int xdrsize;
277 unsigned int i;
34 278
35 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 279 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
36 return NULL; 280 return NULL;
@@ -38,7 +282,11 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
38 serv->sv_program = prog; 282 serv->sv_program = prog;
39 serv->sv_nrthreads = 1; 283 serv->sv_nrthreads = 1;
40 serv->sv_stats = prog->pg_stats; 284 serv->sv_stats = prog->pg_stats;
41 serv->sv_bufsz = bufsize? bufsize : 4096; 285 if (bufsize > RPCSVC_MAXPAYLOAD)
286 bufsize = RPCSVC_MAXPAYLOAD;
287 serv->sv_max_payload = bufsize? bufsize : 4096;
288 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
289 serv->sv_shutdown = shutdown;
42 xdrsize = 0; 290 xdrsize = 0;
43 while (prog) { 291 while (prog) {
44 prog->pg_lovers = prog->pg_nvers-1; 292 prog->pg_lovers = prog->pg_nvers-1;
@@ -53,20 +301,68 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
53 prog = prog->pg_next; 301 prog = prog->pg_next;
54 } 302 }
55 serv->sv_xdrsize = xdrsize; 303 serv->sv_xdrsize = xdrsize;
56 INIT_LIST_HEAD(&serv->sv_threads);
57 INIT_LIST_HEAD(&serv->sv_sockets);
58 INIT_LIST_HEAD(&serv->sv_tempsocks); 304 INIT_LIST_HEAD(&serv->sv_tempsocks);
59 INIT_LIST_HEAD(&serv->sv_permsocks); 305 INIT_LIST_HEAD(&serv->sv_permsocks);
306 init_timer(&serv->sv_temptimer);
60 spin_lock_init(&serv->sv_lock); 307 spin_lock_init(&serv->sv_lock);
61 308
309 serv->sv_nrpools = npools;
310 serv->sv_pools =
311 kcalloc(sizeof(struct svc_pool), serv->sv_nrpools,
312 GFP_KERNEL);
313 if (!serv->sv_pools) {
314 kfree(serv);
315 return NULL;
316 }
317
318 for (i = 0; i < serv->sv_nrpools; i++) {
319 struct svc_pool *pool = &serv->sv_pools[i];
320
321 dprintk("initialising pool %u for %s\n",
322 i, serv->sv_name);
323
324 pool->sp_id = i;
325 INIT_LIST_HEAD(&pool->sp_threads);
326 INIT_LIST_HEAD(&pool->sp_sockets);
327 INIT_LIST_HEAD(&pool->sp_all_threads);
328 spin_lock_init(&pool->sp_lock);
329 }
330
331
62 /* Remove any stale portmap registrations */ 332 /* Remove any stale portmap registrations */
63 svc_register(serv, 0, 0); 333 svc_register(serv, 0, 0);
64 334
65 return serv; 335 return serv;
66} 336}
67 337
338struct svc_serv *
339svc_create(struct svc_program *prog, unsigned int bufsize,
340 void (*shutdown)(struct svc_serv *serv))
341{
342 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
343}
344
345struct svc_serv *
346svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
347 void (*shutdown)(struct svc_serv *serv),
348 svc_thread_fn func, int sig, struct module *mod)
349{
350 struct svc_serv *serv;
351 unsigned int npools = svc_pool_map_init();
352
353 serv = __svc_create(prog, bufsize, npools, shutdown);
354
355 if (serv != NULL) {
356 serv->sv_function = func;
357 serv->sv_kill_signal = sig;
358 serv->sv_module = mod;
359 }
360
361 return serv;
362}
363
68/* 364/*
69 * Destroy an RPC service 365 * Destroy an RPC service. Should be called with the BKL held
70 */ 366 */
71void 367void
72svc_destroy(struct svc_serv *serv) 368svc_destroy(struct svc_serv *serv)
@@ -85,12 +381,17 @@ svc_destroy(struct svc_serv *serv)
85 } else 381 } else
86 printk("svc_destroy: no threads for serv=%p!\n", serv); 382 printk("svc_destroy: no threads for serv=%p!\n", serv);
87 383
384 del_timer_sync(&serv->sv_temptimer);
385
88 while (!list_empty(&serv->sv_tempsocks)) { 386 while (!list_empty(&serv->sv_tempsocks)) {
89 svsk = list_entry(serv->sv_tempsocks.next, 387 svsk = list_entry(serv->sv_tempsocks.next,
90 struct svc_sock, 388 struct svc_sock,
91 sk_list); 389 sk_list);
92 svc_delete_socket(svsk); 390 svc_delete_socket(svsk);
93 } 391 }
392 if (serv->sv_shutdown)
393 serv->sv_shutdown(serv);
394
94 while (!list_empty(&serv->sv_permsocks)) { 395 while (!list_empty(&serv->sv_permsocks)) {
95 svsk = list_entry(serv->sv_permsocks.next, 396 svsk = list_entry(serv->sv_permsocks.next,
96 struct svc_sock, 397 struct svc_sock,
@@ -102,6 +403,7 @@ svc_destroy(struct svc_serv *serv)
102 403
103 /* Unregister service with the portmapper */ 404 /* Unregister service with the portmapper */
104 svc_register(serv, 0, 0); 405 svc_register(serv, 0, 0);
406 kfree(serv->sv_pools);
105 kfree(serv); 407 kfree(serv);
106} 408}
107 409
@@ -115,21 +417,18 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
115 int pages; 417 int pages;
116 int arghi; 418 int arghi;
117 419
118 if (size > RPCSVC_MAXPAYLOAD) 420 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
119 size = RPCSVC_MAXPAYLOAD; 421 * We assume one is at most one page
120 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; 422 */
121 rqstp->rq_argused = 0;
122 rqstp->rq_resused = 0;
123 arghi = 0; 423 arghi = 0;
124 BUG_ON(pages > RPCSVC_MAXPAGES); 424 BUG_ON(pages > RPCSVC_MAXPAGES);
125 while (pages) { 425 while (pages) {
126 struct page *p = alloc_page(GFP_KERNEL); 426 struct page *p = alloc_page(GFP_KERNEL);
127 if (!p) 427 if (!p)
128 break; 428 break;
129 rqstp->rq_argpages[arghi++] = p; 429 rqstp->rq_pages[arghi++] = p;
130 pages--; 430 pages--;
131 } 431 }
132 rqstp->rq_arghi = arghi;
133 return ! pages; 432 return ! pages;
134} 433}
135 434
@@ -139,24 +438,25 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
139static void 438static void
140svc_release_buffer(struct svc_rqst *rqstp) 439svc_release_buffer(struct svc_rqst *rqstp)
141{ 440{
142 while (rqstp->rq_arghi) 441 int i;
143 put_page(rqstp->rq_argpages[--rqstp->rq_arghi]); 442 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)
144 while (rqstp->rq_resused) { 443 if (rqstp->rq_pages[i])
145 if (rqstp->rq_respages[--rqstp->rq_resused] == NULL) 444 put_page(rqstp->rq_pages[i]);
146 continue;
147 put_page(rqstp->rq_respages[rqstp->rq_resused]);
148 }
149 rqstp->rq_argused = 0;
150} 445}
151 446
152/* 447/*
153 * Create a server thread 448 * Create a thread in the given pool. Caller must hold BKL.
449 * On a NUMA or SMP machine, with a multi-pool serv, the thread
450 * will be restricted to run on the cpus belonging to the pool.
154 */ 451 */
155int 452static int
156svc_create_thread(svc_thread_fn func, struct svc_serv *serv) 453__svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
454 struct svc_pool *pool)
157{ 455{
158 struct svc_rqst *rqstp; 456 struct svc_rqst *rqstp;
159 int error = -ENOMEM; 457 int error = -ENOMEM;
458 int have_oldmask = 0;
459 cpumask_t oldmask;
160 460
161 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 461 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
162 if (!rqstp) 462 if (!rqstp)
@@ -166,12 +466,25 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
166 466
167 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 467 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
168 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 468 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
169 || !svc_init_buffer(rqstp, serv->sv_bufsz)) 469 || !svc_init_buffer(rqstp, serv->sv_max_mesg))
170 goto out_thread; 470 goto out_thread;
171 471
172 serv->sv_nrthreads++; 472 serv->sv_nrthreads++;
473 spin_lock_bh(&pool->sp_lock);
474 pool->sp_nrthreads++;
475 list_add(&rqstp->rq_all, &pool->sp_all_threads);
476 spin_unlock_bh(&pool->sp_lock);
173 rqstp->rq_server = serv; 477 rqstp->rq_server = serv;
478 rqstp->rq_pool = pool;
479
480 if (serv->sv_nrpools > 1)
481 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
482
174 error = kernel_thread((int (*)(void *)) func, rqstp, 0); 483 error = kernel_thread((int (*)(void *)) func, rqstp, 0);
484
485 if (have_oldmask)
486 set_cpus_allowed(current, oldmask);
487
175 if (error < 0) 488 if (error < 0)
176 goto out_thread; 489 goto out_thread;
177 svc_sock_update_bufs(serv); 490 svc_sock_update_bufs(serv);
@@ -185,17 +498,136 @@ out_thread:
185} 498}
186 499
187/* 500/*
188 * Destroy an RPC server thread 501 * Create a thread in the default pool. Caller must hold BKL.
502 */
503int
504svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
505{
506 return __svc_create_thread(func, serv, &serv->sv_pools[0]);
507}
508
509/*
510 * Choose a pool in which to create a new thread, for svc_set_num_threads
511 */
512static inline struct svc_pool *
513choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
514{
515 if (pool != NULL)
516 return pool;
517
518 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
519}
520
521/*
522 * Choose a thread to kill, for svc_set_num_threads
523 */
524static inline struct task_struct *
525choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
526{
527 unsigned int i;
528 struct task_struct *task = NULL;
529
530 if (pool != NULL) {
531 spin_lock_bh(&pool->sp_lock);
532 } else {
533 /* choose a pool in round-robin fashion */
534 for (i = 0; i < serv->sv_nrpools; i++) {
535 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
536 spin_lock_bh(&pool->sp_lock);
537 if (!list_empty(&pool->sp_all_threads))
538 goto found_pool;
539 spin_unlock_bh(&pool->sp_lock);
540 }
541 return NULL;
542 }
543
544found_pool:
545 if (!list_empty(&pool->sp_all_threads)) {
546 struct svc_rqst *rqstp;
547
548 /*
549 * Remove from the pool->sp_all_threads list
550 * so we don't try to kill it again.
551 */
552 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
553 list_del_init(&rqstp->rq_all);
554 task = rqstp->rq_task;
555 }
556 spin_unlock_bh(&pool->sp_lock);
557
558 return task;
559}
560
561/*
562 * Create or destroy enough new threads to make the number
563 * of threads the given number. If `pool' is non-NULL, applies
564 * only to threads in that pool, otherwise round-robins between
565 * all pools. Must be called with a svc_get() reference and
566 * the BKL held.
567 *
568 * Destroying threads relies on the service threads filling in
569 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
570 * has been created using svc_create_pooled().
571 *
572 * Based on code that used to be in nfsd_svc() but tweaked
573 * to be pool-aware.
574 */
575int
576svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
577{
578 struct task_struct *victim;
579 int error = 0;
580 unsigned int state = serv->sv_nrthreads-1;
581
582 if (pool == NULL) {
583 /* The -1 assumes caller has done a svc_get() */
584 nrservs -= (serv->sv_nrthreads-1);
585 } else {
586 spin_lock_bh(&pool->sp_lock);
587 nrservs -= pool->sp_nrthreads;
588 spin_unlock_bh(&pool->sp_lock);
589 }
590
591 /* create new threads */
592 while (nrservs > 0) {
593 nrservs--;
594 __module_get(serv->sv_module);
595 error = __svc_create_thread(serv->sv_function, serv,
596 choose_pool(serv, pool, &state));
597 if (error < 0) {
598 module_put(serv->sv_module);
599 break;
600 }
601 }
602 /* destroy old threads */
603 while (nrservs < 0 &&
604 (victim = choose_victim(serv, pool, &state)) != NULL) {
605 send_sig(serv->sv_kill_signal, victim, 1);
606 nrservs++;
607 }
608
609 return error;
610}
611
612/*
613 * Called from a server thread as it's exiting. Caller must hold BKL.
189 */ 614 */
190void 615void
191svc_exit_thread(struct svc_rqst *rqstp) 616svc_exit_thread(struct svc_rqst *rqstp)
192{ 617{
193 struct svc_serv *serv = rqstp->rq_server; 618 struct svc_serv *serv = rqstp->rq_server;
619 struct svc_pool *pool = rqstp->rq_pool;
194 620
195 svc_release_buffer(rqstp); 621 svc_release_buffer(rqstp);
196 kfree(rqstp->rq_resp); 622 kfree(rqstp->rq_resp);
197 kfree(rqstp->rq_argp); 623 kfree(rqstp->rq_argp);
198 kfree(rqstp->rq_auth_data); 624 kfree(rqstp->rq_auth_data);
625
626 spin_lock_bh(&pool->sp_lock);
627 pool->sp_nrthreads--;
628 list_del(&rqstp->rq_all);
629 spin_unlock_bh(&pool->sp_lock);
630
199 kfree(rqstp); 631 kfree(rqstp);
200 632
201 /* Release the server */ 633 /* Release the server */
@@ -215,23 +647,32 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
215 unsigned long flags; 647 unsigned long flags;
216 int i, error = 0, dummy; 648 int i, error = 0, dummy;
217 649
218 progp = serv->sv_program;
219
220 dprintk("RPC: svc_register(%s, %s, %d)\n",
221 progp->pg_name, proto == IPPROTO_UDP? "udp" : "tcp", port);
222
223 if (!port) 650 if (!port)
224 clear_thread_flag(TIF_SIGPENDING); 651 clear_thread_flag(TIF_SIGPENDING);
225 652
226 for (i = 0; i < progp->pg_nvers; i++) { 653 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
227 if (progp->pg_vers[i] == NULL) 654 for (i = 0; i < progp->pg_nvers; i++) {
228 continue; 655 if (progp->pg_vers[i] == NULL)
229 error = rpc_register(progp->pg_prog, i, proto, port, &dummy); 656 continue;
230 if (error < 0) 657
231 break; 658 dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n",
232 if (port && !dummy) { 659 progp->pg_name,
233 error = -EACCES; 660 proto == IPPROTO_UDP? "udp" : "tcp",
234 break; 661 port,
662 i,
663 progp->pg_vers[i]->vs_hidden?
664 " (but not telling portmap)" : "");
665
666 if (progp->pg_vers[i]->vs_hidden)
667 continue;
668
669 error = rpc_register(progp->pg_prog, i, proto, port, &dummy);
670 if (error < 0)
671 break;
672 if (port && !dummy) {
673 error = -EACCES;
674 break;
675 }
235 } 676 }
236 } 677 }
237 678
@@ -248,19 +689,20 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
248 * Process the RPC request. 689 * Process the RPC request.
249 */ 690 */
250int 691int
251svc_process(struct svc_serv *serv, struct svc_rqst *rqstp) 692svc_process(struct svc_rqst *rqstp)
252{ 693{
253 struct svc_program *progp; 694 struct svc_program *progp;
254 struct svc_version *versp = NULL; /* compiler food */ 695 struct svc_version *versp = NULL; /* compiler food */
255 struct svc_procedure *procp = NULL; 696 struct svc_procedure *procp = NULL;
256 struct kvec * argv = &rqstp->rq_arg.head[0]; 697 struct kvec * argv = &rqstp->rq_arg.head[0];
257 struct kvec * resv = &rqstp->rq_res.head[0]; 698 struct kvec * resv = &rqstp->rq_res.head[0];
699 struct svc_serv *serv = rqstp->rq_server;
258 kxdrproc_t xdr; 700 kxdrproc_t xdr;
259 u32 *statp; 701 __be32 *statp;
260 u32 dir, prog, vers, proc, 702 u32 dir, prog, vers, proc;
261 auth_stat, rpc_stat; 703 __be32 auth_stat, rpc_stat;
262 int auth_res; 704 int auth_res;
263 u32 *accept_statp; 705 __be32 *reply_statp;
264 706
265 rpc_stat = rpc_success; 707 rpc_stat = rpc_success;
266 708
@@ -270,10 +712,10 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
270 /* setup response xdr_buf. 712 /* setup response xdr_buf.
271 * Initially it has just one page 713 * Initially it has just one page
272 */ 714 */
273 svc_take_page(rqstp); /* must succeed */ 715 rqstp->rq_resused = 1;
274 resv->iov_base = page_address(rqstp->rq_respages[0]); 716 resv->iov_base = page_address(rqstp->rq_respages[0]);
275 resv->iov_len = 0; 717 resv->iov_len = 0;
276 rqstp->rq_res.pages = rqstp->rq_respages+1; 718 rqstp->rq_res.pages = rqstp->rq_respages + 1;
277 rqstp->rq_res.len = 0; 719 rqstp->rq_res.len = 0;
278 rqstp->rq_res.page_base = 0; 720 rqstp->rq_res.page_base = 0;
279 rqstp->rq_res.page_len = 0; 721 rqstp->rq_res.page_len = 0;
@@ -284,16 +726,16 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
284 rqstp->rq_sendfile_ok = 1; 726 rqstp->rq_sendfile_ok = 1;
285 /* tcp needs a space for the record length... */ 727 /* tcp needs a space for the record length... */
286 if (rqstp->rq_prot == IPPROTO_TCP) 728 if (rqstp->rq_prot == IPPROTO_TCP)
287 svc_putu32(resv, 0); 729 svc_putnl(resv, 0);
288 730
289 rqstp->rq_xid = svc_getu32(argv); 731 rqstp->rq_xid = svc_getu32(argv);
290 svc_putu32(resv, rqstp->rq_xid); 732 svc_putu32(resv, rqstp->rq_xid);
291 733
292 dir = ntohl(svc_getu32(argv)); 734 dir = svc_getnl(argv);
293 vers = ntohl(svc_getu32(argv)); 735 vers = svc_getnl(argv);
294 736
295 /* First words of reply: */ 737 /* First words of reply: */
296 svc_putu32(resv, xdr_one); /* REPLY */ 738 svc_putnl(resv, 1); /* REPLY */
297 739
298 if (dir != 0) /* direction != CALL */ 740 if (dir != 0) /* direction != CALL */
299 goto err_bad_dir; 741 goto err_bad_dir;
@@ -301,13 +743,13 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
301 goto err_bad_rpc; 743 goto err_bad_rpc;
302 744
303 /* Save position in case we later decide to reject: */ 745 /* Save position in case we later decide to reject: */
304 accept_statp = resv->iov_base + resv->iov_len; 746 reply_statp = resv->iov_base + resv->iov_len;
305 747
306 svc_putu32(resv, xdr_zero); /* ACCEPT */ 748 svc_putnl(resv, 0); /* ACCEPT */
307 749
308 rqstp->rq_prog = prog = ntohl(svc_getu32(argv)); /* program number */ 750 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
309 rqstp->rq_vers = vers = ntohl(svc_getu32(argv)); /* version number */ 751 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
310 rqstp->rq_proc = proc = ntohl(svc_getu32(argv)); /* procedure number */ 752 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
311 753
312 progp = serv->sv_program; 754 progp = serv->sv_program;
313 755
@@ -361,7 +803,7 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
361 803
362 /* Build the reply header. */ 804 /* Build the reply header. */
363 statp = resv->iov_base +resv->iov_len; 805 statp = resv->iov_base +resv->iov_len;
364 svc_putu32(resv, rpc_success); /* RPC_SUCCESS */ 806 svc_putnl(resv, RPC_SUCCESS);
365 807
366 /* Bump per-procedure stats counter */ 808 /* Bump per-procedure stats counter */
367 procp->pc_count++; 809 procp->pc_count++;
@@ -386,6 +828,11 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
386 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 828 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
387 829
388 /* Encode reply */ 830 /* Encode reply */
831 if (*statp == rpc_drop_reply) {
832 if (procp->pc_release)
833 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
834 goto dropit;
835 }
389 if (*statp == rpc_success && (xdr = procp->pc_encode) 836 if (*statp == rpc_success && (xdr = procp->pc_encode)
390 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { 837 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
391 dprintk("svc: failed to encode reply\n"); 838 dprintk("svc: failed to encode reply\n");
@@ -439,26 +886,26 @@ err_bad_dir:
439 886
440err_bad_rpc: 887err_bad_rpc:
441 serv->sv_stats->rpcbadfmt++; 888 serv->sv_stats->rpcbadfmt++;
442 svc_putu32(resv, xdr_one); /* REJECT */ 889 svc_putnl(resv, 1); /* REJECT */
443 svc_putu32(resv, xdr_zero); /* RPC_MISMATCH */ 890 svc_putnl(resv, 0); /* RPC_MISMATCH */
444 svc_putu32(resv, xdr_two); /* Only RPCv2 supported */ 891 svc_putnl(resv, 2); /* Only RPCv2 supported */
445 svc_putu32(resv, xdr_two); 892 svc_putnl(resv, 2);
446 goto sendit; 893 goto sendit;
447 894
448err_bad_auth: 895err_bad_auth:
449 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); 896 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
450 serv->sv_stats->rpcbadauth++; 897 serv->sv_stats->rpcbadauth++;
451 /* Restore write pointer to location of accept status: */ 898 /* Restore write pointer to location of accept status: */
452 xdr_ressize_check(rqstp, accept_statp); 899 xdr_ressize_check(rqstp, reply_statp);
453 svc_putu32(resv, xdr_one); /* REJECT */ 900 svc_putnl(resv, 1); /* REJECT */
454 svc_putu32(resv, xdr_one); /* AUTH_ERROR */ 901 svc_putnl(resv, 1); /* AUTH_ERROR */
455 svc_putu32(resv, auth_stat); /* status */ 902 svc_putnl(resv, ntohl(auth_stat)); /* status */
456 goto sendit; 903 goto sendit;
457 904
458err_bad_prog: 905err_bad_prog:
459 dprintk("svc: unknown program %d\n", prog); 906 dprintk("svc: unknown program %d\n", prog);
460 serv->sv_stats->rpcbadfmt++; 907 serv->sv_stats->rpcbadfmt++;
461 svc_putu32(resv, rpc_prog_unavail); 908 svc_putnl(resv, RPC_PROG_UNAVAIL);
462 goto sendit; 909 goto sendit;
463 910
464err_bad_vers: 911err_bad_vers:
@@ -466,9 +913,9 @@ err_bad_vers:
466 printk("svc: unknown version (%d)\n", vers); 913 printk("svc: unknown version (%d)\n", vers);
467#endif 914#endif
468 serv->sv_stats->rpcbadfmt++; 915 serv->sv_stats->rpcbadfmt++;
469 svc_putu32(resv, rpc_prog_mismatch); 916 svc_putnl(resv, RPC_PROG_MISMATCH);
470 svc_putu32(resv, htonl(progp->pg_lovers)); 917 svc_putnl(resv, progp->pg_lovers);
471 svc_putu32(resv, htonl(progp->pg_hivers)); 918 svc_putnl(resv, progp->pg_hivers);
472 goto sendit; 919 goto sendit;
473 920
474err_bad_proc: 921err_bad_proc:
@@ -476,7 +923,7 @@ err_bad_proc:
476 printk("svc: unknown procedure (%d)\n", proc); 923 printk("svc: unknown procedure (%d)\n", proc);
477#endif 924#endif
478 serv->sv_stats->rpcbadfmt++; 925 serv->sv_stats->rpcbadfmt++;
479 svc_putu32(resv, rpc_proc_unavail); 926 svc_putnl(resv, RPC_PROC_UNAVAIL);
480 goto sendit; 927 goto sendit;
481 928
482err_garbage: 929err_garbage:
@@ -486,6 +933,21 @@ err_garbage:
486 rpc_stat = rpc_garbage_args; 933 rpc_stat = rpc_garbage_args;
487err_bad: 934err_bad:
488 serv->sv_stats->rpcbadfmt++; 935 serv->sv_stats->rpcbadfmt++;
489 svc_putu32(resv, rpc_stat); 936 svc_putnl(resv, ntohl(rpc_stat));
490 goto sendit; 937 goto sendit;
491} 938}
939
940/*
941 * Return (transport-specific) limit on the rpc payload.
942 */
943u32 svc_max_payload(const struct svc_rqst *rqstp)
944{
945 int max = RPCSVC_MAXPAYLOAD_TCP;
946
947 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
948 max = RPCSVC_MAXPAYLOAD_UDP;
949 if (rqstp->rq_server->sv_max_payload < max)
950 max = rqstp->rq_server->sv_max_payload;
951 return max;
952}
953EXPORT_SYMBOL_GPL(svc_max_payload);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 5b28c6176806..8f2320aded5c 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -35,14 +35,14 @@ static struct auth_ops *authtab[RPC_AUTH_MAXFLAVOR] = {
35}; 35};
36 36
37int 37int
38svc_authenticate(struct svc_rqst *rqstp, u32 *authp) 38svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
39{ 39{
40 rpc_authflavor_t flavor; 40 rpc_authflavor_t flavor;
41 struct auth_ops *aops; 41 struct auth_ops *aops;
42 42
43 *authp = rpc_auth_ok; 43 *authp = rpc_auth_ok;
44 44
45 flavor = ntohl(svc_getu32(&rqstp->rq_arg.head[0])); 45 flavor = svc_getnl(&rqstp->rq_arg.head[0]);
46 46
47 dprintk("svc: svc_authenticate (%d)\n", flavor); 47 dprintk("svc: svc_authenticate (%d)\n", flavor);
48 48
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 7e5707e2d6b6..e1bd933629fe 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -9,6 +9,7 @@
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <linux/hash.h> 10#include <linux/hash.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <net/sock.h>
12 13
13#define RPCDBG_FACILITY RPCDBG_AUTH 14#define RPCDBG_FACILITY RPCDBG_AUTH
14 15
@@ -145,7 +146,7 @@ static void ip_map_request(struct cache_detail *cd,
145{ 146{
146 char text_addr[20]; 147 char text_addr[20];
147 struct ip_map *im = container_of(h, struct ip_map, h); 148 struct ip_map *im = container_of(h, struct ip_map, h);
148 __u32 addr = im->m_addr.s_addr; 149 __be32 addr = im->m_addr.s_addr;
149 150
150 snprintf(text_addr, 20, "%u.%u.%u.%u", 151 snprintf(text_addr, 20, "%u.%u.%u.%u",
151 ntohl(addr) >> 24 & 0xff, 152 ntohl(addr) >> 24 & 0xff,
@@ -249,10 +250,10 @@ static int ip_map_show(struct seq_file *m,
249 250
250 seq_printf(m, "%s %d.%d.%d.%d %s\n", 251 seq_printf(m, "%s %d.%d.%d.%d %s\n",
251 im->m_class, 252 im->m_class,
252 htonl(addr.s_addr) >> 24 & 0xff, 253 ntohl(addr.s_addr) >> 24 & 0xff,
253 htonl(addr.s_addr) >> 16 & 0xff, 254 ntohl(addr.s_addr) >> 16 & 0xff,
254 htonl(addr.s_addr) >> 8 & 0xff, 255 ntohl(addr.s_addr) >> 8 & 0xff,
255 htonl(addr.s_addr) >> 0 & 0xff, 256 ntohl(addr.s_addr) >> 0 & 0xff,
256 dom 257 dom
257 ); 258 );
258 return 0; 259 return 0;
@@ -348,12 +349,9 @@ int auth_unix_forget_old(struct auth_domain *dom)
348 349
349struct auth_domain *auth_unix_lookup(struct in_addr addr) 350struct auth_domain *auth_unix_lookup(struct in_addr addr)
350{ 351{
351 struct ip_map key, *ipm; 352 struct ip_map *ipm;
352 struct auth_domain *rv; 353 struct auth_domain *rv;
353 354
354 strcpy(key.m_class, "nfsd");
355 key.m_addr = addr;
356
357 ipm = ip_map_lookup("nfsd", addr); 355 ipm = ip_map_lookup("nfsd", addr);
358 356
359 if (!ipm) 357 if (!ipm)
@@ -378,6 +376,44 @@ void svcauth_unix_purge(void)
378 cache_purge(&ip_map_cache); 376 cache_purge(&ip_map_cache);
379} 377}
380 378
379static inline struct ip_map *
380ip_map_cached_get(struct svc_rqst *rqstp)
381{
382 struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix;
383 if (ipm != NULL) {
384 if (!cache_valid(&ipm->h)) {
385 /*
386 * The entry has been invalidated since it was
387 * remembered, e.g. by a second mount from the
388 * same IP address.
389 */
390 rqstp->rq_sock->sk_info_authunix = NULL;
391 cache_put(&ipm->h, &ip_map_cache);
392 return NULL;
393 }
394 cache_get(&ipm->h);
395 }
396 return ipm;
397}
398
399static inline void
400ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
401{
402 struct svc_sock *svsk = rqstp->rq_sock;
403
404 if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL)
405 svsk->sk_info_authunix = ipm; /* newly cached, keep the reference */
406 else
407 cache_put(&ipm->h, &ip_map_cache);
408}
409
410void
411svcauth_unix_info_release(void *info)
412{
413 struct ip_map *ipm = info;
414 cache_put(&ipm->h, &ip_map_cache);
415}
416
381static int 417static int
382svcauth_unix_set_client(struct svc_rqst *rqstp) 418svcauth_unix_set_client(struct svc_rqst *rqstp)
383{ 419{
@@ -387,8 +423,10 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
387 if (rqstp->rq_proc == 0) 423 if (rqstp->rq_proc == 0)
388 return SVC_OK; 424 return SVC_OK;
389 425
390 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, 426 ipm = ip_map_cached_get(rqstp);
391 rqstp->rq_addr.sin_addr); 427 if (ipm == NULL)
428 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
429 rqstp->rq_addr.sin_addr);
392 430
393 if (ipm == NULL) 431 if (ipm == NULL)
394 return SVC_DENIED; 432 return SVC_DENIED;
@@ -403,14 +441,14 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
403 case 0: 441 case 0:
404 rqstp->rq_client = &ipm->m_client->h; 442 rqstp->rq_client = &ipm->m_client->h;
405 kref_get(&rqstp->rq_client->ref); 443 kref_get(&rqstp->rq_client->ref);
406 cache_put(&ipm->h, &ip_map_cache); 444 ip_map_cached_put(rqstp, ipm);
407 break; 445 break;
408 } 446 }
409 return SVC_OK; 447 return SVC_OK;
410} 448}
411 449
412static int 450static int
413svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp) 451svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
414{ 452{
415 struct kvec *argv = &rqstp->rq_arg.head[0]; 453 struct kvec *argv = &rqstp->rq_arg.head[0];
416 struct kvec *resv = &rqstp->rq_res.head[0]; 454 struct kvec *resv = &rqstp->rq_res.head[0];
@@ -427,7 +465,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
427 *authp = rpc_autherr_badcred; 465 *authp = rpc_autherr_badcred;
428 return SVC_DENIED; 466 return SVC_DENIED;
429 } 467 }
430 if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) { 468 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
431 dprintk("svc: bad null verf\n"); 469 dprintk("svc: bad null verf\n");
432 *authp = rpc_autherr_badverf; 470 *authp = rpc_autherr_badverf;
433 return SVC_DENIED; 471 return SVC_DENIED;
@@ -441,8 +479,8 @@ svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
441 return SVC_DROP; /* kmalloc failure - client must retry */ 479 return SVC_DROP; /* kmalloc failure - client must retry */
442 480
443 /* Put NULL verifier */ 481 /* Put NULL verifier */
444 svc_putu32(resv, RPC_AUTH_NULL); 482 svc_putnl(resv, RPC_AUTH_NULL);
445 svc_putu32(resv, 0); 483 svc_putnl(resv, 0);
446 484
447 return SVC_OK; 485 return SVC_OK;
448} 486}
@@ -472,7 +510,7 @@ struct auth_ops svcauth_null = {
472 510
473 511
474static int 512static int
475svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp) 513svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
476{ 514{
477 struct kvec *argv = &rqstp->rq_arg.head[0]; 515 struct kvec *argv = &rqstp->rq_arg.head[0];
478 struct kvec *resv = &rqstp->rq_res.head[0]; 516 struct kvec *resv = &rqstp->rq_res.head[0];
@@ -488,31 +526,31 @@ svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
488 526
489 svc_getu32(argv); /* length */ 527 svc_getu32(argv); /* length */
490 svc_getu32(argv); /* time stamp */ 528 svc_getu32(argv); /* time stamp */
491 slen = XDR_QUADLEN(ntohl(svc_getu32(argv))); /* machname length */ 529 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
492 if (slen > 64 || (len -= (slen + 3)*4) < 0) 530 if (slen > 64 || (len -= (slen + 3)*4) < 0)
493 goto badcred; 531 goto badcred;
494 argv->iov_base = (void*)((u32*)argv->iov_base + slen); /* skip machname */ 532 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
495 argv->iov_len -= slen*4; 533 argv->iov_len -= slen*4;
496 534
497 cred->cr_uid = ntohl(svc_getu32(argv)); /* uid */ 535 cred->cr_uid = svc_getnl(argv); /* uid */
498 cred->cr_gid = ntohl(svc_getu32(argv)); /* gid */ 536 cred->cr_gid = svc_getnl(argv); /* gid */
499 slen = ntohl(svc_getu32(argv)); /* gids length */ 537 slen = svc_getnl(argv); /* gids length */
500 if (slen > 16 || (len -= (slen + 2)*4) < 0) 538 if (slen > 16 || (len -= (slen + 2)*4) < 0)
501 goto badcred; 539 goto badcred;
502 cred->cr_group_info = groups_alloc(slen); 540 cred->cr_group_info = groups_alloc(slen);
503 if (cred->cr_group_info == NULL) 541 if (cred->cr_group_info == NULL)
504 return SVC_DROP; 542 return SVC_DROP;
505 for (i = 0; i < slen; i++) 543 for (i = 0; i < slen; i++)
506 GROUP_AT(cred->cr_group_info, i) = ntohl(svc_getu32(argv)); 544 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
507 545
508 if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) { 546 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
509 *authp = rpc_autherr_badverf; 547 *authp = rpc_autherr_badverf;
510 return SVC_DENIED; 548 return SVC_DENIED;
511 } 549 }
512 550
513 /* Put NULL verifier */ 551 /* Put NULL verifier */
514 svc_putu32(resv, RPC_AUTH_NULL); 552 svc_putnl(resv, RPC_AUTH_NULL);
515 svc_putu32(resv, 0); 553 svc_putnl(resv, 0);
516 554
517 return SVC_OK; 555 return SVC_OK;
518 556
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 953aff89bcac..96521f16342b 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/file.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/checksum.h> 36#include <net/checksum.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -45,13 +46,16 @@
45 46
46/* SMP locking strategy: 47/* SMP locking strategy:
47 * 48 *
48 * svc_serv->sv_lock protects most stuff for that service. 49 * svc_pool->sp_lock protects most of the fields of that pool.
50 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
51 * when both need to be taken (rare), svc_serv->sv_lock is first.
52 * BKL protects svc_serv->sv_nrthread.
53 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
54 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
49 * 55 *
50 * Some flags can be set to certain values at any time 56 * Some flags can be set to certain values at any time
51 * providing that certain rules are followed: 57 * providing that certain rules are followed:
52 * 58 *
53 * SK_BUSY can be set to 0 at any time.
54 * svc_sock_enqueue must be called afterwards
55 * SK_CONN, SK_DATA, can be set or cleared at any time. 59 * SK_CONN, SK_DATA, can be set or cleared at any time.
56 * after a set, svc_sock_enqueue must be called. 60 * after a set, svc_sock_enqueue must be called.
57 * after a clear, the socket must be read/accepted 61 * after a clear, the socket must be read/accepted
@@ -73,23 +77,30 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
73static int svc_deferred_recv(struct svc_rqst *rqstp); 77static int svc_deferred_recv(struct svc_rqst *rqstp);
74static struct cache_deferred_req *svc_defer(struct cache_req *req); 78static struct cache_deferred_req *svc_defer(struct cache_req *req);
75 79
80/* apparently the "standard" is that clients close
81 * idle connections after 5 minutes, servers after
82 * 6 minutes
83 * http://www.connectathon.org/talks96/nfstcp.pdf
84 */
85static int svc_conn_age_period = 6*60;
86
76/* 87/*
77 * Queue up an idle server thread. Must have serv->sv_lock held. 88 * Queue up an idle server thread. Must have pool->sp_lock held.
78 * Note: this is really a stack rather than a queue, so that we only 89 * Note: this is really a stack rather than a queue, so that we only
79 * use as many different threads as we need, and the rest don't polute 90 * use as many different threads as we need, and the rest don't pollute
80 * the cache. 91 * the cache.
81 */ 92 */
82static inline void 93static inline void
83svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp) 94svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
84{ 95{
85 list_add(&rqstp->rq_list, &serv->sv_threads); 96 list_add(&rqstp->rq_list, &pool->sp_threads);
86} 97}
87 98
88/* 99/*
89 * Dequeue an nfsd thread. Must have serv->sv_lock held. 100 * Dequeue an nfsd thread. Must have pool->sp_lock held.
90 */ 101 */
91static inline void 102static inline void
92svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp) 103svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
93{ 104{
94 list_del(&rqstp->rq_list); 105 list_del(&rqstp->rq_list);
95} 106}
@@ -140,7 +151,9 @@ static void
140svc_sock_enqueue(struct svc_sock *svsk) 151svc_sock_enqueue(struct svc_sock *svsk)
141{ 152{
142 struct svc_serv *serv = svsk->sk_server; 153 struct svc_serv *serv = svsk->sk_server;
154 struct svc_pool *pool;
143 struct svc_rqst *rqstp; 155 struct svc_rqst *rqstp;
156 int cpu;
144 157
145 if (!(svsk->sk_flags & 158 if (!(svsk->sk_flags &
146 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) 159 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
@@ -148,10 +161,14 @@ svc_sock_enqueue(struct svc_sock *svsk)
148 if (test_bit(SK_DEAD, &svsk->sk_flags)) 161 if (test_bit(SK_DEAD, &svsk->sk_flags))
149 return; 162 return;
150 163
151 spin_lock_bh(&serv->sv_lock); 164 cpu = get_cpu();
165 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
166 put_cpu();
167
168 spin_lock_bh(&pool->sp_lock);
152 169
153 if (!list_empty(&serv->sv_threads) && 170 if (!list_empty(&pool->sp_threads) &&
154 !list_empty(&serv->sv_sockets)) 171 !list_empty(&pool->sp_sockets))
155 printk(KERN_ERR 172 printk(KERN_ERR
156 "svc_sock_enqueue: threads and sockets both waiting??\n"); 173 "svc_sock_enqueue: threads and sockets both waiting??\n");
157 174
@@ -161,73 +178,79 @@ svc_sock_enqueue(struct svc_sock *svsk)
161 goto out_unlock; 178 goto out_unlock;
162 } 179 }
163 180
164 if (test_bit(SK_BUSY, &svsk->sk_flags)) { 181 /* Mark socket as busy. It will remain in this state until the
165 /* Don't enqueue socket while daemon is receiving */ 182 * server has processed all pending data and put the socket back
183 * on the idle list. We update SK_BUSY atomically because
184 * it also guards against trying to enqueue the svc_sock twice.
185 */
186 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
187 /* Don't enqueue socket while already enqueued */
166 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 188 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
167 goto out_unlock; 189 goto out_unlock;
168 } 190 }
191 BUG_ON(svsk->sk_pool != NULL);
192 svsk->sk_pool = pool;
169 193
170 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 194 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
171 if (((svsk->sk_reserved + serv->sv_bufsz)*2 195 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
172 > svc_sock_wspace(svsk)) 196 > svc_sock_wspace(svsk))
173 && !test_bit(SK_CLOSE, &svsk->sk_flags) 197 && !test_bit(SK_CLOSE, &svsk->sk_flags)
174 && !test_bit(SK_CONN, &svsk->sk_flags)) { 198 && !test_bit(SK_CONN, &svsk->sk_flags)) {
175 /* Don't enqueue while not enough space for reply */ 199 /* Don't enqueue while not enough space for reply */
176 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 200 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
177 svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz, 201 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
178 svc_sock_wspace(svsk)); 202 svc_sock_wspace(svsk));
203 svsk->sk_pool = NULL;
204 clear_bit(SK_BUSY, &svsk->sk_flags);
179 goto out_unlock; 205 goto out_unlock;
180 } 206 }
181 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 207 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
182 208
183 /* Mark socket as busy. It will remain in this state until the
184 * server has processed all pending data and put the socket back
185 * on the idle list.
186 */
187 set_bit(SK_BUSY, &svsk->sk_flags);
188 209
189 if (!list_empty(&serv->sv_threads)) { 210 if (!list_empty(&pool->sp_threads)) {
190 rqstp = list_entry(serv->sv_threads.next, 211 rqstp = list_entry(pool->sp_threads.next,
191 struct svc_rqst, 212 struct svc_rqst,
192 rq_list); 213 rq_list);
193 dprintk("svc: socket %p served by daemon %p\n", 214 dprintk("svc: socket %p served by daemon %p\n",
194 svsk->sk_sk, rqstp); 215 svsk->sk_sk, rqstp);
195 svc_serv_dequeue(serv, rqstp); 216 svc_thread_dequeue(pool, rqstp);
196 if (rqstp->rq_sock) 217 if (rqstp->rq_sock)
197 printk(KERN_ERR 218 printk(KERN_ERR
198 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 219 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
199 rqstp, rqstp->rq_sock); 220 rqstp, rqstp->rq_sock);
200 rqstp->rq_sock = svsk; 221 rqstp->rq_sock = svsk;
201 svsk->sk_inuse++; 222 atomic_inc(&svsk->sk_inuse);
202 rqstp->rq_reserved = serv->sv_bufsz; 223 rqstp->rq_reserved = serv->sv_max_mesg;
203 svsk->sk_reserved += rqstp->rq_reserved; 224 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
225 BUG_ON(svsk->sk_pool != pool);
204 wake_up(&rqstp->rq_wait); 226 wake_up(&rqstp->rq_wait);
205 } else { 227 } else {
206 dprintk("svc: socket %p put into queue\n", svsk->sk_sk); 228 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
207 list_add_tail(&svsk->sk_ready, &serv->sv_sockets); 229 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
230 BUG_ON(svsk->sk_pool != pool);
208 } 231 }
209 232
210out_unlock: 233out_unlock:
211 spin_unlock_bh(&serv->sv_lock); 234 spin_unlock_bh(&pool->sp_lock);
212} 235}
213 236
214/* 237/*
215 * Dequeue the first socket. Must be called with the serv->sv_lock held. 238 * Dequeue the first socket. Must be called with the pool->sp_lock held.
216 */ 239 */
217static inline struct svc_sock * 240static inline struct svc_sock *
218svc_sock_dequeue(struct svc_serv *serv) 241svc_sock_dequeue(struct svc_pool *pool)
219{ 242{
220 struct svc_sock *svsk; 243 struct svc_sock *svsk;
221 244
222 if (list_empty(&serv->sv_sockets)) 245 if (list_empty(&pool->sp_sockets))
223 return NULL; 246 return NULL;
224 247
225 svsk = list_entry(serv->sv_sockets.next, 248 svsk = list_entry(pool->sp_sockets.next,
226 struct svc_sock, sk_ready); 249 struct svc_sock, sk_ready);
227 list_del_init(&svsk->sk_ready); 250 list_del_init(&svsk->sk_ready);
228 251
229 dprintk("svc: socket %p dequeued, inuse=%d\n", 252 dprintk("svc: socket %p dequeued, inuse=%d\n",
230 svsk->sk_sk, svsk->sk_inuse); 253 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
231 254
232 return svsk; 255 return svsk;
233} 256}
@@ -241,6 +264,7 @@ svc_sock_dequeue(struct svc_serv *serv)
241static inline void 264static inline void
242svc_sock_received(struct svc_sock *svsk) 265svc_sock_received(struct svc_sock *svsk)
243{ 266{
267 svsk->sk_pool = NULL;
244 clear_bit(SK_BUSY, &svsk->sk_flags); 268 clear_bit(SK_BUSY, &svsk->sk_flags);
245 svc_sock_enqueue(svsk); 269 svc_sock_enqueue(svsk);
246} 270}
@@ -262,10 +286,8 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
262 286
263 if (space < rqstp->rq_reserved) { 287 if (space < rqstp->rq_reserved) {
264 struct svc_sock *svsk = rqstp->rq_sock; 288 struct svc_sock *svsk = rqstp->rq_sock;
265 spin_lock_bh(&svsk->sk_server->sv_lock); 289 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
266 svsk->sk_reserved -= (rqstp->rq_reserved - space);
267 rqstp->rq_reserved = space; 290 rqstp->rq_reserved = space;
268 spin_unlock_bh(&svsk->sk_server->sv_lock);
269 291
270 svc_sock_enqueue(svsk); 292 svc_sock_enqueue(svsk);
271 } 293 }
@@ -277,17 +299,11 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
277static inline void 299static inline void
278svc_sock_put(struct svc_sock *svsk) 300svc_sock_put(struct svc_sock *svsk)
279{ 301{
280 struct svc_serv *serv = svsk->sk_server; 302 if (atomic_dec_and_test(&svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
281
282 spin_lock_bh(&serv->sv_lock);
283 if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
284 spin_unlock_bh(&serv->sv_lock);
285 dprintk("svc: releasing dead socket\n"); 303 dprintk("svc: releasing dead socket\n");
286 sock_release(svsk->sk_sock); 304 sock_release(svsk->sk_sock);
287 kfree(svsk); 305 kfree(svsk);
288 } 306 }
289 else
290 spin_unlock_bh(&serv->sv_lock);
291} 307}
292 308
293static void 309static void
@@ -297,7 +313,7 @@ svc_sock_release(struct svc_rqst *rqstp)
297 313
298 svc_release_skb(rqstp); 314 svc_release_skb(rqstp);
299 315
300 svc_free_allpages(rqstp); 316 svc_free_res_pages(rqstp);
301 rqstp->rq_res.page_len = 0; 317 rqstp->rq_res.page_len = 0;
302 rqstp->rq_res.page_base = 0; 318 rqstp->rq_res.page_base = 0;
303 319
@@ -321,25 +337,33 @@ svc_sock_release(struct svc_rqst *rqstp)
321 337
322/* 338/*
323 * External function to wake up a server waiting for data 339 * External function to wake up a server waiting for data
340 * This really only makes sense for services like lockd
341 * which have exactly one thread anyway.
324 */ 342 */
325void 343void
326svc_wake_up(struct svc_serv *serv) 344svc_wake_up(struct svc_serv *serv)
327{ 345{
328 struct svc_rqst *rqstp; 346 struct svc_rqst *rqstp;
329 347 unsigned int i;
330 spin_lock_bh(&serv->sv_lock); 348 struct svc_pool *pool;
331 if (!list_empty(&serv->sv_threads)) { 349
332 rqstp = list_entry(serv->sv_threads.next, 350 for (i = 0; i < serv->sv_nrpools; i++) {
333 struct svc_rqst, 351 pool = &serv->sv_pools[i];
334 rq_list); 352
335 dprintk("svc: daemon %p woken up.\n", rqstp); 353 spin_lock_bh(&pool->sp_lock);
336 /* 354 if (!list_empty(&pool->sp_threads)) {
337 svc_serv_dequeue(serv, rqstp); 355 rqstp = list_entry(pool->sp_threads.next,
338 rqstp->rq_sock = NULL; 356 struct svc_rqst,
339 */ 357 rq_list);
340 wake_up(&rqstp->rq_wait); 358 dprintk("svc: daemon %p woken up.\n", rqstp);
359 /*
360 svc_thread_dequeue(pool, rqstp);
361 rqstp->rq_sock = NULL;
362 */
363 wake_up(&rqstp->rq_wait);
364 }
365 spin_unlock_bh(&pool->sp_lock);
341 } 366 }
342 spin_unlock_bh(&serv->sv_lock);
343} 367}
344 368
345/* 369/*
@@ -388,7 +412,8 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
388 /* send head */ 412 /* send head */
389 if (slen == xdr->head[0].iov_len) 413 if (slen == xdr->head[0].iov_len)
390 flags = 0; 414 flags = 0;
391 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); 415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
416 xdr->head[0].iov_len, flags);
392 if (len != xdr->head[0].iov_len) 417 if (len != xdr->head[0].iov_len)
393 goto out; 418 goto out;
394 slen -= xdr->head[0].iov_len; 419 slen -= xdr->head[0].iov_len;
@@ -413,8 +438,9 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
413 } 438 }
414 /* send tail */ 439 /* send tail */
415 if (xdr->tail[0].iov_len) { 440 if (xdr->tail[0].iov_len) {
416 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], 441 result = kernel_sendpage(sock, rqstp->rq_respages[0],
417 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), 442 ((unsigned long)xdr->tail[0].iov_base)
443 & (PAGE_SIZE-1),
418 xdr->tail[0].iov_len, 0); 444 xdr->tail[0].iov_len, 0);
419 445
420 if (result > 0) 446 if (result > 0)
@@ -429,6 +455,56 @@ out:
429} 455}
430 456
431/* 457/*
458 * Report socket names for nfsdfs
459 */
460static int one_sock_name(char *buf, struct svc_sock *svsk)
461{
462 int len;
463
464 switch(svsk->sk_sk->sk_family) {
465 case AF_INET:
466 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
467 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
468 "udp" : "tcp",
469 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
470 inet_sk(svsk->sk_sk)->num);
471 break;
472 default:
473 len = sprintf(buf, "*unknown-%d*\n",
474 svsk->sk_sk->sk_family);
475 }
476 return len;
477}
478
479int
480svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
481{
482 struct svc_sock *svsk, *closesk = NULL;
483 int len = 0;
484
485 if (!serv)
486 return 0;
487 spin_lock(&serv->sv_lock);
488 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
489 int onelen = one_sock_name(buf+len, svsk);
490 if (toclose && strcmp(toclose, buf+len) == 0)
491 closesk = svsk;
492 else
493 len += onelen;
494 }
495 spin_unlock(&serv->sv_lock);
496 if (closesk)
497 /* Should unregister with portmap, but you cannot
498 * unregister just one protocol...
499 */
500 svc_delete_socket(closesk);
501 else if (toclose)
502 return -ENOENT;
503 return len;
504}
505EXPORT_SYMBOL(svc_sock_names);
506
507/*
432 * Check input queue length 508 * Check input queue length
433 */ 509 */
434static int 510static int
@@ -557,11 +633,14 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
557 /* udp sockets need large rcvbuf as all pending 633 /* udp sockets need large rcvbuf as all pending
558 * requests are still in that buffer. sndbuf must 634 * requests are still in that buffer. sndbuf must
559 * also be large enough that there is enough space 635 * also be large enough that there is enough space
560 * for one reply per thread. 636 * for one reply per thread. We count all threads
637 * rather than threads in a particular pool, which
638 * provides an upper bound on the number of threads
639 * which will access the socket.
561 */ 640 */
562 svc_sock_setbufsize(svsk->sk_sock, 641 svc_sock_setbufsize(svsk->sk_sock,
563 (serv->sv_nrthreads+3) * serv->sv_bufsz, 642 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
564 (serv->sv_nrthreads+3) * serv->sv_bufsz); 643 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
565 644
566 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 645 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
567 svc_sock_received(svsk); 646 svc_sock_received(svsk);
@@ -631,9 +710,11 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
631 if (len <= rqstp->rq_arg.head[0].iov_len) { 710 if (len <= rqstp->rq_arg.head[0].iov_len) {
632 rqstp->rq_arg.head[0].iov_len = len; 711 rqstp->rq_arg.head[0].iov_len = len;
633 rqstp->rq_arg.page_len = 0; 712 rqstp->rq_arg.page_len = 0;
713 rqstp->rq_respages = rqstp->rq_pages+1;
634 } else { 714 } else {
635 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 715 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
636 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 716 rqstp->rq_respages = rqstp->rq_pages + 1 +
717 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
637 } 718 }
638 719
639 if (serv->sv_stats) 720 if (serv->sv_stats)
@@ -668,8 +749,8 @@ svc_udp_init(struct svc_sock *svsk)
668 * svc_udp_recvfrom will re-adjust if necessary 749 * svc_udp_recvfrom will re-adjust if necessary
669 */ 750 */
670 svc_sock_setbufsize(svsk->sk_sock, 751 svc_sock_setbufsize(svsk->sk_sock,
671 3 * svsk->sk_server->sv_bufsz, 752 3 * svsk->sk_server->sv_max_mesg,
672 3 * svsk->sk_server->sv_bufsz); 753 3 * svsk->sk_server->sv_max_mesg);
673 754
674 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 755 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
675 set_bit(SK_CHNGBUF, &svsk->sk_flags); 756 set_bit(SK_CHNGBUF, &svsk->sk_flags);
@@ -844,7 +925,7 @@ svc_tcp_accept(struct svc_sock *svsk)
844 struct svc_sock, 925 struct svc_sock,
845 sk_list); 926 sk_list);
846 set_bit(SK_CLOSE, &svsk->sk_flags); 927 set_bit(SK_CLOSE, &svsk->sk_flags);
847 svsk->sk_inuse ++; 928 atomic_inc(&svsk->sk_inuse);
848 } 929 }
849 spin_unlock_bh(&serv->sv_lock); 930 spin_unlock_bh(&serv->sv_lock);
850 931
@@ -874,7 +955,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
874 struct svc_sock *svsk = rqstp->rq_sock; 955 struct svc_sock *svsk = rqstp->rq_sock;
875 struct svc_serv *serv = svsk->sk_server; 956 struct svc_serv *serv = svsk->sk_server;
876 int len; 957 int len;
877 struct kvec vec[RPCSVC_MAXPAGES]; 958 struct kvec *vec;
878 int pnum, vlen; 959 int pnum, vlen;
879 960
880 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 961 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
@@ -892,7 +973,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
892 return 0; 973 return 0;
893 } 974 }
894 975
895 if (test_bit(SK_CONN, &svsk->sk_flags)) { 976 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
896 svc_tcp_accept(svsk); 977 svc_tcp_accept(svsk);
897 svc_sock_received(svsk); 978 svc_sock_received(svsk);
898 return 0; 979 return 0;
@@ -902,13 +983,18 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
902 /* sndbuf needs to have room for one request 983 /* sndbuf needs to have room for one request
903 * per thread, otherwise we can stall even when the 984 * per thread, otherwise we can stall even when the
904 * network isn't a bottleneck. 985 * network isn't a bottleneck.
986 *
987 * We count all threads rather than threads in a
988 * particular pool, which provides an upper bound
989 * on the number of threads which will access the socket.
990 *
905 * rcvbuf just needs to be able to hold a few requests. 991 * rcvbuf just needs to be able to hold a few requests.
906 * Normally they will be removed from the queue 992 * Normally they will be removed from the queue
907 * as soon a a complete request arrives. 993 * as soon a a complete request arrives.
908 */ 994 */
909 svc_sock_setbufsize(svsk->sk_sock, 995 svc_sock_setbufsize(svsk->sk_sock,
910 (serv->sv_nrthreads+3) * serv->sv_bufsz, 996 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
911 3 * serv->sv_bufsz); 997 3 * serv->sv_max_mesg);
912 998
913 clear_bit(SK_DATA, &svsk->sk_flags); 999 clear_bit(SK_DATA, &svsk->sk_flags);
914 1000
@@ -946,7 +1032,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
946 } 1032 }
947 svsk->sk_reclen &= 0x7fffffff; 1033 svsk->sk_reclen &= 0x7fffffff;
948 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1034 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
949 if (svsk->sk_reclen > serv->sv_bufsz) { 1035 if (svsk->sk_reclen > serv->sv_max_mesg) {
950 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", 1036 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
951 (unsigned long) svsk->sk_reclen); 1037 (unsigned long) svsk->sk_reclen);
952 goto err_delete; 1038 goto err_delete;
@@ -967,15 +1053,17 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
967 len = svsk->sk_reclen; 1053 len = svsk->sk_reclen;
968 set_bit(SK_DATA, &svsk->sk_flags); 1054 set_bit(SK_DATA, &svsk->sk_flags);
969 1055
1056 vec = rqstp->rq_vec;
970 vec[0] = rqstp->rq_arg.head[0]; 1057 vec[0] = rqstp->rq_arg.head[0];
971 vlen = PAGE_SIZE; 1058 vlen = PAGE_SIZE;
972 pnum = 1; 1059 pnum = 1;
973 while (vlen < len) { 1060 while (vlen < len) {
974 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); 1061 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
975 vec[pnum].iov_len = PAGE_SIZE; 1062 vec[pnum].iov_len = PAGE_SIZE;
976 pnum++; 1063 pnum++;
977 vlen += PAGE_SIZE; 1064 vlen += PAGE_SIZE;
978 } 1065 }
1066 rqstp->rq_respages = &rqstp->rq_pages[pnum];
979 1067
980 /* Now receive data */ 1068 /* Now receive data */
981 len = svc_recvfrom(rqstp, vec, pnum, len); 1069 len = svc_recvfrom(rqstp, vec, pnum, len);
@@ -1030,7 +1118,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
1030{ 1118{
1031 struct xdr_buf *xbufp = &rqstp->rq_res; 1119 struct xdr_buf *xbufp = &rqstp->rq_res;
1032 int sent; 1120 int sent;
1033 u32 reclen; 1121 __be32 reclen;
1034 1122
1035 /* Set up the first element of the reply kvec. 1123 /* Set up the first element of the reply kvec.
1036 * Any other kvecs that may be in use have been taken 1124 * Any other kvecs that may be in use have been taken
@@ -1083,8 +1171,8 @@ svc_tcp_init(struct svc_sock *svsk)
1083 * svc_tcp_recvfrom will re-adjust if necessary 1171 * svc_tcp_recvfrom will re-adjust if necessary
1084 */ 1172 */
1085 svc_sock_setbufsize(svsk->sk_sock, 1173 svc_sock_setbufsize(svsk->sk_sock,
1086 3 * svsk->sk_server->sv_bufsz, 1174 3 * svsk->sk_server->sv_max_mesg,
1087 3 * svsk->sk_server->sv_bufsz); 1175 3 * svsk->sk_server->sv_max_mesg);
1088 1176
1089 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1177 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1090 set_bit(SK_DATA, &svsk->sk_flags); 1178 set_bit(SK_DATA, &svsk->sk_flags);
@@ -1117,13 +1205,17 @@ svc_sock_update_bufs(struct svc_serv *serv)
1117} 1205}
1118 1206
1119/* 1207/*
1120 * Receive the next request on any socket. 1208 * Receive the next request on any socket. This code is carefully
1209 * organised not to touch any cachelines in the shared svc_serv
1210 * structure, only cachelines in the local svc_pool.
1121 */ 1211 */
1122int 1212int
1123svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) 1213svc_recv(struct svc_rqst *rqstp, long timeout)
1124{ 1214{
1125 struct svc_sock *svsk =NULL; 1215 struct svc_sock *svsk =NULL;
1126 int len; 1216 struct svc_serv *serv = rqstp->rq_server;
1217 struct svc_pool *pool = rqstp->rq_pool;
1218 int len, i;
1127 int pages; 1219 int pages;
1128 struct xdr_buf *arg; 1220 struct xdr_buf *arg;
1129 DECLARE_WAITQUEUE(wait, current); 1221 DECLARE_WAITQUEUE(wait, current);
@@ -1140,27 +1232,22 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1140 "svc_recv: service %p, wait queue active!\n", 1232 "svc_recv: service %p, wait queue active!\n",
1141 rqstp); 1233 rqstp);
1142 1234
1143 /* Initialize the buffers */
1144 /* first reclaim pages that were moved to response list */
1145 svc_pushback_allpages(rqstp);
1146 1235
1147 /* now allocate needed pages. If we get a failure, sleep briefly */ 1236 /* now allocate needed pages. If we get a failure, sleep briefly */
1148 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1237 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1149 while (rqstp->rq_arghi < pages) { 1238 for (i=0; i < pages ; i++)
1150 struct page *p = alloc_page(GFP_KERNEL); 1239 while (rqstp->rq_pages[i] == NULL) {
1151 if (!p) { 1240 struct page *p = alloc_page(GFP_KERNEL);
1152 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1241 if (!p)
1153 continue; 1242 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1243 rqstp->rq_pages[i] = p;
1154 } 1244 }
1155 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1156 }
1157 1245
1158 /* Make arg->head point to first page and arg->pages point to rest */ 1246 /* Make arg->head point to first page and arg->pages point to rest */
1159 arg = &rqstp->rq_arg; 1247 arg = &rqstp->rq_arg;
1160 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); 1248 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1161 arg->head[0].iov_len = PAGE_SIZE; 1249 arg->head[0].iov_len = PAGE_SIZE;
1162 rqstp->rq_argused = 1; 1250 arg->pages = rqstp->rq_pages + 1;
1163 arg->pages = rqstp->rq_argpages + 1;
1164 arg->page_base = 0; 1251 arg->page_base = 0;
1165 /* save at least one page for response */ 1252 /* save at least one page for response */
1166 arg->page_len = (pages-2)*PAGE_SIZE; 1253 arg->page_len = (pages-2)*PAGE_SIZE;
@@ -1172,32 +1259,15 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1172 if (signalled()) 1259 if (signalled())
1173 return -EINTR; 1260 return -EINTR;
1174 1261
1175 spin_lock_bh(&serv->sv_lock); 1262 spin_lock_bh(&pool->sp_lock);
1176 if (!list_empty(&serv->sv_tempsocks)) { 1263 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1177 svsk = list_entry(serv->sv_tempsocks.next,
1178 struct svc_sock, sk_list);
1179 /* apparently the "standard" is that clients close
1180 * idle connections after 5 minutes, servers after
1181 * 6 minutes
1182 * http://www.connectathon.org/talks96/nfstcp.pdf
1183 */
1184 if (get_seconds() - svsk->sk_lastrecv < 6*60
1185 || test_bit(SK_BUSY, &svsk->sk_flags))
1186 svsk = NULL;
1187 }
1188 if (svsk) {
1189 set_bit(SK_BUSY, &svsk->sk_flags);
1190 set_bit(SK_CLOSE, &svsk->sk_flags);
1191 rqstp->rq_sock = svsk;
1192 svsk->sk_inuse++;
1193 } else if ((svsk = svc_sock_dequeue(serv)) != NULL) {
1194 rqstp->rq_sock = svsk; 1264 rqstp->rq_sock = svsk;
1195 svsk->sk_inuse++; 1265 atomic_inc(&svsk->sk_inuse);
1196 rqstp->rq_reserved = serv->sv_bufsz; 1266 rqstp->rq_reserved = serv->sv_max_mesg;
1197 svsk->sk_reserved += rqstp->rq_reserved; 1267 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1198 } else { 1268 } else {
1199 /* No data pending. Go to sleep */ 1269 /* No data pending. Go to sleep */
1200 svc_serv_enqueue(serv, rqstp); 1270 svc_thread_enqueue(pool, rqstp);
1201 1271
1202 /* 1272 /*
1203 * We have to be able to interrupt this wait 1273 * We have to be able to interrupt this wait
@@ -1205,26 +1275,26 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1205 */ 1275 */
1206 set_current_state(TASK_INTERRUPTIBLE); 1276 set_current_state(TASK_INTERRUPTIBLE);
1207 add_wait_queue(&rqstp->rq_wait, &wait); 1277 add_wait_queue(&rqstp->rq_wait, &wait);
1208 spin_unlock_bh(&serv->sv_lock); 1278 spin_unlock_bh(&pool->sp_lock);
1209 1279
1210 schedule_timeout(timeout); 1280 schedule_timeout(timeout);
1211 1281
1212 try_to_freeze(); 1282 try_to_freeze();
1213 1283
1214 spin_lock_bh(&serv->sv_lock); 1284 spin_lock_bh(&pool->sp_lock);
1215 remove_wait_queue(&rqstp->rq_wait, &wait); 1285 remove_wait_queue(&rqstp->rq_wait, &wait);
1216 1286
1217 if (!(svsk = rqstp->rq_sock)) { 1287 if (!(svsk = rqstp->rq_sock)) {
1218 svc_serv_dequeue(serv, rqstp); 1288 svc_thread_dequeue(pool, rqstp);
1219 spin_unlock_bh(&serv->sv_lock); 1289 spin_unlock_bh(&pool->sp_lock);
1220 dprintk("svc: server %p, no data yet\n", rqstp); 1290 dprintk("svc: server %p, no data yet\n", rqstp);
1221 return signalled()? -EINTR : -EAGAIN; 1291 return signalled()? -EINTR : -EAGAIN;
1222 } 1292 }
1223 } 1293 }
1224 spin_unlock_bh(&serv->sv_lock); 1294 spin_unlock_bh(&pool->sp_lock);
1225 1295
1226 dprintk("svc: server %p, socket %p, inuse=%d\n", 1296 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1227 rqstp, svsk, svsk->sk_inuse); 1297 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
1228 len = svsk->sk_recvfrom(rqstp); 1298 len = svsk->sk_recvfrom(rqstp);
1229 dprintk("svc: got len=%d\n", len); 1299 dprintk("svc: got len=%d\n", len);
1230 1300
@@ -1235,13 +1305,7 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1235 return -EAGAIN; 1305 return -EAGAIN;
1236 } 1306 }
1237 svsk->sk_lastrecv = get_seconds(); 1307 svsk->sk_lastrecv = get_seconds();
1238 if (test_bit(SK_TEMP, &svsk->sk_flags)) { 1308 clear_bit(SK_OLD, &svsk->sk_flags);
1239 /* push active sockets to end of list */
1240 spin_lock_bh(&serv->sv_lock);
1241 if (!list_empty(&svsk->sk_list))
1242 list_move_tail(&svsk->sk_list, &serv->sv_tempsocks);
1243 spin_unlock_bh(&serv->sv_lock);
1244 }
1245 1309
1246 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024; 1310 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1247 rqstp->rq_chandle.defer = svc_defer; 1311 rqstp->rq_chandle.defer = svc_defer;
@@ -1301,6 +1365,58 @@ svc_send(struct svc_rqst *rqstp)
1301} 1365}
1302 1366
1303/* 1367/*
1368 * Timer function to close old temporary sockets, using
1369 * a mark-and-sweep algorithm.
1370 */
1371static void
1372svc_age_temp_sockets(unsigned long closure)
1373{
1374 struct svc_serv *serv = (struct svc_serv *)closure;
1375 struct svc_sock *svsk;
1376 struct list_head *le, *next;
1377 LIST_HEAD(to_be_aged);
1378
1379 dprintk("svc_age_temp_sockets\n");
1380
1381 if (!spin_trylock_bh(&serv->sv_lock)) {
1382 /* busy, try again 1 sec later */
1383 dprintk("svc_age_temp_sockets: busy\n");
1384 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1385 return;
1386 }
1387
1388 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1389 svsk = list_entry(le, struct svc_sock, sk_list);
1390
1391 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1392 continue;
1393 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags))
1394 continue;
1395 atomic_inc(&svsk->sk_inuse);
1396 list_move(le, &to_be_aged);
1397 set_bit(SK_CLOSE, &svsk->sk_flags);
1398 set_bit(SK_DETACHED, &svsk->sk_flags);
1399 }
1400 spin_unlock_bh(&serv->sv_lock);
1401
1402 while (!list_empty(&to_be_aged)) {
1403 le = to_be_aged.next;
1404 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1405 list_del_init(le);
1406 svsk = list_entry(le, struct svc_sock, sk_list);
1407
1408 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1409 svsk, get_seconds() - svsk->sk_lastrecv);
1410
1411 /* a thread will dequeue and close it soon */
1412 svc_sock_enqueue(svsk);
1413 svc_sock_put(svsk);
1414 }
1415
1416 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1417}
1418
1419/*
1304 * Initialize socket for RPC use and create svc_sock struct 1420 * Initialize socket for RPC use and create svc_sock struct
1305 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. 1421 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1306 */ 1422 */
@@ -1337,7 +1453,9 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1337 svsk->sk_odata = inet->sk_data_ready; 1453 svsk->sk_odata = inet->sk_data_ready;
1338 svsk->sk_owspace = inet->sk_write_space; 1454 svsk->sk_owspace = inet->sk_write_space;
1339 svsk->sk_server = serv; 1455 svsk->sk_server = serv;
1456 atomic_set(&svsk->sk_inuse, 0);
1340 svsk->sk_lastrecv = get_seconds(); 1457 svsk->sk_lastrecv = get_seconds();
1458 spin_lock_init(&svsk->sk_defer_lock);
1341 INIT_LIST_HEAD(&svsk->sk_deferred); 1459 INIT_LIST_HEAD(&svsk->sk_deferred);
1342 INIT_LIST_HEAD(&svsk->sk_ready); 1460 INIT_LIST_HEAD(&svsk->sk_ready);
1343 mutex_init(&svsk->sk_mutex); 1461 mutex_init(&svsk->sk_mutex);
@@ -1353,6 +1471,13 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1353 set_bit(SK_TEMP, &svsk->sk_flags); 1471 set_bit(SK_TEMP, &svsk->sk_flags);
1354 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1472 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1355 serv->sv_tmpcnt++; 1473 serv->sv_tmpcnt++;
1474 if (serv->sv_temptimer.function == NULL) {
1475 /* setup timer to age temp sockets */
1476 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1477 (unsigned long)serv);
1478 mod_timer(&serv->sv_temptimer,
1479 jiffies + svc_conn_age_period * HZ);
1480 }
1356 } else { 1481 } else {
1357 clear_bit(SK_TEMP, &svsk->sk_flags); 1482 clear_bit(SK_TEMP, &svsk->sk_flags);
1358 list_add(&svsk->sk_list, &serv->sv_permsocks); 1483 list_add(&svsk->sk_list, &serv->sv_permsocks);
@@ -1367,6 +1492,38 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1367 return svsk; 1492 return svsk;
1368} 1493}
1369 1494
1495int svc_addsock(struct svc_serv *serv,
1496 int fd,
1497 char *name_return,
1498 int *proto)
1499{
1500 int err = 0;
1501 struct socket *so = sockfd_lookup(fd, &err);
1502 struct svc_sock *svsk = NULL;
1503
1504 if (!so)
1505 return err;
1506 if (so->sk->sk_family != AF_INET)
1507 err = -EAFNOSUPPORT;
1508 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1509 so->sk->sk_protocol != IPPROTO_UDP)
1510 err = -EPROTONOSUPPORT;
1511 else if (so->state > SS_UNCONNECTED)
1512 err = -EISCONN;
1513 else {
1514 svsk = svc_setup_socket(serv, so, &err, 1);
1515 if (svsk)
1516 err = 0;
1517 }
1518 if (err) {
1519 sockfd_put(so);
1520 return err;
1521 }
1522 if (proto) *proto = so->sk->sk_protocol;
1523 return one_sock_name(name_return, svsk);
1524}
1525EXPORT_SYMBOL_GPL(svc_addsock);
1526
1370/* 1527/*
1371 * Create socket for RPC service. 1528 * Create socket for RPC service.
1372 */ 1529 */
@@ -1393,14 +1550,12 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1393 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) 1550 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1394 return error; 1551 return error;
1395 1552
1396 if (sin != NULL) { 1553 if (type == SOCK_STREAM)
1397 if (type == SOCK_STREAM) 1554 sock->sk->sk_reuse = 1; /* allow address reuse */
1398 sock->sk->sk_reuse = 1; /* allow address reuse */ 1555 error = kernel_bind(sock, (struct sockaddr *) sin,
1399 error = kernel_bind(sock, (struct sockaddr *) sin, 1556 sizeof(*sin));
1400 sizeof(*sin)); 1557 if (error < 0)
1401 if (error < 0) 1558 goto bummer;
1402 goto bummer;
1403 }
1404 1559
1405 if (protocol == IPPROTO_TCP) { 1560 if (protocol == IPPROTO_TCP) {
1406 if ((error = kernel_listen(sock, 64)) < 0) 1561 if ((error = kernel_listen(sock, 64)) < 0)
@@ -1436,15 +1591,27 @@ svc_delete_socket(struct svc_sock *svsk)
1436 1591
1437 spin_lock_bh(&serv->sv_lock); 1592 spin_lock_bh(&serv->sv_lock);
1438 1593
1439 list_del_init(&svsk->sk_list); 1594 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1440 list_del_init(&svsk->sk_ready); 1595 list_del_init(&svsk->sk_list);
1596 /*
1597 * We used to delete the svc_sock from whichever list
1598 * it's sk_ready node was on, but we don't actually
1599 * need to. This is because the only time we're called
1600 * while still attached to a queue, the queue itself
1601 * is about to be destroyed (in svc_destroy).
1602 */
1441 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) 1603 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
1442 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1604 if (test_bit(SK_TEMP, &svsk->sk_flags))
1443 serv->sv_tmpcnt--; 1605 serv->sv_tmpcnt--;
1444 1606
1445 if (!svsk->sk_inuse) { 1607 if (!atomic_read(&svsk->sk_inuse)) {
1446 spin_unlock_bh(&serv->sv_lock); 1608 spin_unlock_bh(&serv->sv_lock);
1447 sock_release(svsk->sk_sock); 1609 if (svsk->sk_sock->file)
1610 sockfd_put(svsk->sk_sock);
1611 else
1612 sock_release(svsk->sk_sock);
1613 if (svsk->sk_info_authunix != NULL)
1614 svcauth_unix_info_release(svsk->sk_info_authunix);
1448 kfree(svsk); 1615 kfree(svsk);
1449 } else { 1616 } else {
1450 spin_unlock_bh(&serv->sv_lock); 1617 spin_unlock_bh(&serv->sv_lock);
@@ -1475,7 +1642,6 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1475static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1642static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1476{ 1643{
1477 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1644 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1478 struct svc_serv *serv = dreq->owner;
1479 struct svc_sock *svsk; 1645 struct svc_sock *svsk;
1480 1646
1481 if (too_many) { 1647 if (too_many) {
@@ -1486,9 +1652,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1486 dprintk("revisit queued\n"); 1652 dprintk("revisit queued\n");
1487 svsk = dr->svsk; 1653 svsk = dr->svsk;
1488 dr->svsk = NULL; 1654 dr->svsk = NULL;
1489 spin_lock_bh(&serv->sv_lock); 1655 spin_lock_bh(&svsk->sk_defer_lock);
1490 list_add(&dr->handle.recent, &svsk->sk_deferred); 1656 list_add(&dr->handle.recent, &svsk->sk_deferred);
1491 spin_unlock_bh(&serv->sv_lock); 1657 spin_unlock_bh(&svsk->sk_defer_lock);
1492 set_bit(SK_DEFERRED, &svsk->sk_flags); 1658 set_bit(SK_DEFERRED, &svsk->sk_flags);
1493 svc_sock_enqueue(svsk); 1659 svc_sock_enqueue(svsk);
1494 svc_sock_put(svsk); 1660 svc_sock_put(svsk);
@@ -1520,10 +1686,8 @@ svc_defer(struct cache_req *req)
1520 dr->argslen = rqstp->rq_arg.len >> 2; 1686 dr->argslen = rqstp->rq_arg.len >> 2;
1521 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1687 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1522 } 1688 }
1523 spin_lock_bh(&rqstp->rq_server->sv_lock); 1689 atomic_inc(&rqstp->rq_sock->sk_inuse);
1524 rqstp->rq_sock->sk_inuse++;
1525 dr->svsk = rqstp->rq_sock; 1690 dr->svsk = rqstp->rq_sock;
1526 spin_unlock_bh(&rqstp->rq_server->sv_lock);
1527 1691
1528 dr->handle.revisit = svc_revisit; 1692 dr->handle.revisit = svc_revisit;
1529 return &dr->handle; 1693 return &dr->handle;
@@ -1543,6 +1707,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1543 rqstp->rq_prot = dr->prot; 1707 rqstp->rq_prot = dr->prot;
1544 rqstp->rq_addr = dr->addr; 1708 rqstp->rq_addr = dr->addr;
1545 rqstp->rq_daddr = dr->daddr; 1709 rqstp->rq_daddr = dr->daddr;
1710 rqstp->rq_respages = rqstp->rq_pages;
1546 return dr->argslen<<2; 1711 return dr->argslen<<2;
1547} 1712}
1548 1713
@@ -1550,11 +1715,10 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1550static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1715static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1551{ 1716{
1552 struct svc_deferred_req *dr = NULL; 1717 struct svc_deferred_req *dr = NULL;
1553 struct svc_serv *serv = svsk->sk_server;
1554 1718
1555 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1719 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1556 return NULL; 1720 return NULL;
1557 spin_lock_bh(&serv->sv_lock); 1721 spin_lock_bh(&svsk->sk_defer_lock);
1558 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1722 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1559 if (!list_empty(&svsk->sk_deferred)) { 1723 if (!list_empty(&svsk->sk_deferred)) {
1560 dr = list_entry(svsk->sk_deferred.next, 1724 dr = list_entry(svsk->sk_deferred.next,
@@ -1563,6 +1727,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1563 list_del_init(&dr->handle.recent); 1727 list_del_init(&dr->handle.recent);
1564 set_bit(SK_DEFERRED, &svsk->sk_flags); 1728 set_bit(SK_DEFERRED, &svsk->sk_flags);
1565 } 1729 }
1566 spin_unlock_bh(&serv->sv_lock); 1730 spin_unlock_bh(&svsk->sk_defer_lock);
1567 return dr; 1731 return dr;
1568} 1732}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 6ac45103a272..9022eb8b37ed 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -18,8 +18,8 @@
18/* 18/*
19 * XDR functions for basic NFS types 19 * XDR functions for basic NFS types
20 */ 20 */
21u32 * 21__be32 *
22xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj) 22xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
23{ 23{
24 unsigned int quadlen = XDR_QUADLEN(obj->len); 24 unsigned int quadlen = XDR_QUADLEN(obj->len);
25 25
@@ -29,8 +29,8 @@ xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
29 return p + XDR_QUADLEN(obj->len); 29 return p + XDR_QUADLEN(obj->len);
30} 30}
31 31
32u32 * 32__be32 *
33xdr_decode_netobj(u32 *p, struct xdr_netobj *obj) 33xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
34{ 34{
35 unsigned int len; 35 unsigned int len;
36 36
@@ -55,7 +55,7 @@ xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
55 * Returns the updated current XDR buffer position 55 * Returns the updated current XDR buffer position
56 * 56 *
57 */ 57 */
58u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes) 58__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
59{ 59{
60 if (likely(nbytes != 0)) { 60 if (likely(nbytes != 0)) {
61 unsigned int quadlen = XDR_QUADLEN(nbytes); 61 unsigned int quadlen = XDR_QUADLEN(nbytes);
@@ -79,21 +79,21 @@ EXPORT_SYMBOL(xdr_encode_opaque_fixed);
79 * 79 *
80 * Returns the updated current XDR buffer position 80 * Returns the updated current XDR buffer position
81 */ 81 */
82u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes) 82__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
83{ 83{
84 *p++ = htonl(nbytes); 84 *p++ = htonl(nbytes);
85 return xdr_encode_opaque_fixed(p, ptr, nbytes); 85 return xdr_encode_opaque_fixed(p, ptr, nbytes);
86} 86}
87EXPORT_SYMBOL(xdr_encode_opaque); 87EXPORT_SYMBOL(xdr_encode_opaque);
88 88
89u32 * 89__be32 *
90xdr_encode_string(u32 *p, const char *string) 90xdr_encode_string(__be32 *p, const char *string)
91{ 91{
92 return xdr_encode_array(p, string, strlen(string)); 92 return xdr_encode_array(p, string, strlen(string));
93} 93}
94 94
95u32 * 95__be32 *
96xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) 96xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
97{ 97{
98 unsigned int len; 98 unsigned int len;
99 99
@@ -432,7 +432,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
432 * of the buffer length, and takes care of adjusting the kvec 432 * of the buffer length, and takes care of adjusting the kvec
433 * length for us. 433 * length for us.
434 */ 434 */
435void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 435void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
436{ 436{
437 struct kvec *iov = buf->head; 437 struct kvec *iov = buf->head;
438 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 438 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
@@ -440,8 +440,8 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
440 BUG_ON(scratch_len < 0); 440 BUG_ON(scratch_len < 0);
441 xdr->buf = buf; 441 xdr->buf = buf;
442 xdr->iov = iov; 442 xdr->iov = iov;
443 xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len); 443 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
444 xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len); 444 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
445 BUG_ON(iov->iov_len > scratch_len); 445 BUG_ON(iov->iov_len > scratch_len);
446 446
447 if (p != xdr->p && p != NULL) { 447 if (p != xdr->p && p != NULL) {
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(xdr_init_encode);
465 * bytes of data. If so, update the total xdr_buf length, and 465 * bytes of data. If so, update the total xdr_buf length, and
466 * adjust the length of the current kvec. 466 * adjust the length of the current kvec.
467 */ 467 */
468uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 468__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
469{ 469{
470 uint32_t *p = xdr->p; 470 __be32 *p = xdr->p;
471 uint32_t *q; 471 __be32 *q;
472 472
473 /* align nbytes on the next 32-bit boundary */ 473 /* align nbytes on the next 32-bit boundary */
474 nbytes += 3; 474 nbytes += 3;
@@ -524,7 +524,7 @@ EXPORT_SYMBOL(xdr_write_pages);
524 * @buf: pointer to XDR buffer from which to decode data 524 * @buf: pointer to XDR buffer from which to decode data
525 * @p: current pointer inside XDR buffer 525 * @p: current pointer inside XDR buffer
526 */ 526 */
527void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 527void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
528{ 528{
529 struct kvec *iov = buf->head; 529 struct kvec *iov = buf->head;
530 unsigned int len = iov->iov_len; 530 unsigned int len = iov->iov_len;
@@ -534,7 +534,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
534 xdr->buf = buf; 534 xdr->buf = buf;
535 xdr->iov = iov; 535 xdr->iov = iov;
536 xdr->p = p; 536 xdr->p = p;
537 xdr->end = (uint32_t *)((char *)iov->iov_base + len); 537 xdr->end = (__be32 *)((char *)iov->iov_base + len);
538} 538}
539EXPORT_SYMBOL(xdr_init_decode); 539EXPORT_SYMBOL(xdr_init_decode);
540 540
@@ -548,10 +548,10 @@ EXPORT_SYMBOL(xdr_init_decode);
548 * If so return the current pointer, then update the current 548 * If so return the current pointer, then update the current
549 * pointer position. 549 * pointer position.
550 */ 550 */
551uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 551__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
552{ 552{
553 uint32_t *p = xdr->p; 553 __be32 *p = xdr->p;
554 uint32_t *q = p + XDR_QUADLEN(nbytes); 554 __be32 *q = p + XDR_QUADLEN(nbytes);
555 555
556 if (unlikely(q > xdr->end || q < p)) 556 if (unlikely(q > xdr->end || q < p))
557 return NULL; 557 return NULL;
@@ -599,8 +599,8 @@ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
599 * Position current pointer at beginning of tail, and 599 * Position current pointer at beginning of tail, and
600 * set remaining message length. 600 * set remaining message length.
601 */ 601 */
602 xdr->p = (uint32_t *)((char *)iov->iov_base + padding); 602 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
603 xdr->end = (uint32_t *)((char *)iov->iov_base + end); 603 xdr->end = (__be32 *)((char *)iov->iov_base + end);
604} 604}
605EXPORT_SYMBOL(xdr_read_pages); 605EXPORT_SYMBOL(xdr_read_pages);
606 606
@@ -624,8 +624,8 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
624 */ 624 */
625 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) 625 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
626 len = PAGE_CACHE_SIZE - xdr->buf->page_base; 626 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
627 xdr->p = (uint32_t *)(kaddr + xdr->buf->page_base); 627 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
628 xdr->end = (uint32_t *)((char *)xdr->p + len); 628 xdr->end = (__be32 *)((char *)xdr->p + len);
629} 629}
630EXPORT_SYMBOL(xdr_enter_page); 630EXPORT_SYMBOL(xdr_enter_page);
631 631
@@ -743,7 +743,7 @@ out:
743int 743int
744xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) 744xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
745{ 745{
746 u32 raw; 746 __be32 raw;
747 int status; 747 int status;
748 748
749 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 749 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
@@ -756,7 +756,7 @@ xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
756int 756int
757xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) 757xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
758{ 758{
759 u32 raw = htonl(obj); 759 __be32 raw = htonl(obj);
760 760
761 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 761 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
762} 762}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 1f786f68729d..80857470dc11 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -594,7 +594,7 @@ static void xprt_connect_status(struct rpc_task *task)
594 * @xid: RPC XID of incoming reply 594 * @xid: RPC XID of incoming reply
595 * 595 *
596 */ 596 */
597struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) 597struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
598{ 598{
599 struct list_head *pos; 599 struct list_head *pos;
600 600
@@ -801,7 +801,7 @@ void xprt_reserve(struct rpc_task *task)
801 spin_unlock(&xprt->reserve_lock); 801 spin_unlock(&xprt->reserve_lock);
802} 802}
803 803
804static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) 804static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
805{ 805{
806 return xprt->xid++; 806 return xprt->xid++;
807} 807}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9b62923a9c06..757fc91ef25d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -548,7 +548,8 @@ static void xs_udp_data_ready(struct sock *sk, int len)
548 struct rpc_rqst *rovr; 548 struct rpc_rqst *rovr;
549 struct sk_buff *skb; 549 struct sk_buff *skb;
550 int err, repsize, copied; 550 int err, repsize, copied;
551 u32 _xid, *xp; 551 u32 _xid;
552 __be32 *xp;
552 553
553 read_lock(&sk->sk_callback_lock); 554 read_lock(&sk->sk_callback_lock);
554 dprintk("RPC: xs_udp_data_ready...\n"); 555 dprintk("RPC: xs_udp_data_ready...\n");
@@ -1365,7 +1366,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1365 if (xprt->slot == NULL) 1366 if (xprt->slot == NULL)
1366 return -ENOMEM; 1367 return -ENOMEM;
1367 1368
1368 if (ntohs(addr->sin_port != 0)) 1369 if (ntohs(addr->sin_port) != 0)
1369 xprt_set_bound(xprt); 1370 xprt_set_bound(xprt);
1370 xprt->port = xs_get_random_port(); 1371 xprt->port = xs_get_random_port();
1371 1372
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 75a5968c2139..39744a33bd36 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -2,7 +2,7 @@
2 * net/tipc/bearer.c: TIPC bearer code 2 * net/tipc/bearer.c: TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -191,14 +191,14 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
191 if ((i < media_count) && (m_ptr->addr2str != NULL)) { 191 if ((i < media_count) && (m_ptr->addr2str != NULL)) {
192 char addr_str[MAX_ADDR_STR]; 192 char addr_str[MAX_ADDR_STR];
193 193
194 tipc_printf(pb, "%s(%s) ", m_ptr->name, 194 tipc_printf(pb, "%s(%s)", m_ptr->name,
195 m_ptr->addr2str(a, addr_str, sizeof(addr_str))); 195 m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
196 } else { 196 } else {
197 unchar *addr = (unchar *)&a->dev_addr; 197 unchar *addr = (unchar *)&a->dev_addr;
198 198
199 tipc_printf(pb, "UNKNOWN(%u):", media_type); 199 tipc_printf(pb, "UNKNOWN(%u)", media_type);
200 for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) { 200 for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
201 tipc_printf(pb, "%02x ", addr[i]); 201 tipc_printf(pb, "-%02x", addr[i]);
202 } 202 }
203 } 203 }
204} 204}
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 285e1bc2d880..ed1351ed05e1 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
2 * net/tipc/config.c: TIPC configuration management code 2 * net/tipc/config.c: TIPC configuration management code
3 * 3 *
4 * Copyright (c) 2002-2006, Ericsson AB 4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -613,7 +613,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
613 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); 613 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
614 break; 614 break;
615 default: 615 default:
616 rep_tlv_buf = NULL; 616 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
617 " (unknown command)");
617 break; 618 break;
618 } 619 }
619 620
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 0539a8362858..6f5b7ee31180 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -57,7 +57,7 @@ void tipc_socket_stop(void);
57int tipc_netlink_start(void); 57int tipc_netlink_start(void);
58void tipc_netlink_stop(void); 58void tipc_netlink_stop(void);
59 59
60#define TIPC_MOD_VER "1.6.1" 60#define TIPC_MOD_VER "1.6.2"
61 61
62#ifndef CONFIG_TIPC_ZONES 62#ifndef CONFIG_TIPC_ZONES
63#define CONFIG_TIPC_ZONES 3 63#define CONFIG_TIPC_ZONES 3
@@ -90,7 +90,7 @@ int tipc_random;
90atomic_t tipc_user_count = ATOMIC_INIT(0); 90atomic_t tipc_user_count = ATOMIC_INIT(0);
91 91
92const char tipc_alphabet[] = 92const char tipc_alphabet[] =
93 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"; 93 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
94 94
95/* configurable TIPC parameters */ 95/* configurable TIPC parameters */
96 96
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 762aac2572be..4638947c2326 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -65,7 +65,7 @@
65#define assert(i) BUG_ON(!(i)) 65#define assert(i) BUG_ON(!(i))
66 66
67struct tipc_msg; 67struct tipc_msg;
68extern struct print_buf *TIPC_CONS, *TIPC_LOG; 68extern struct print_buf *TIPC_NULL, *TIPC_CONS, *TIPC_LOG;
69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *); 69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*); 70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
71void tipc_printf(struct print_buf *, const char *fmt, ...); 71void tipc_printf(struct print_buf *, const char *fmt, ...);
@@ -83,9 +83,9 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
83#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg) 83#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) 84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
85 85
86#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) 86#define dbg(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0) 87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT != TIPC_NULL) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0)
88#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 88#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
89 89
90 90
91/* 91/*
@@ -94,11 +94,11 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
94 * here, or on a per .c file basis, by redefining these symbols. The following 94 * here, or on a per .c file basis, by redefining these symbols. The following
95 * print buffer options are available: 95 * print buffer options are available:
96 * 96 *
97 * NULL : Output to null print buffer (i.e. print nowhere) 97 * TIPC_NULL : null buffer (i.e. print nowhere)
98 * TIPC_CONS : Output to system console 98 * TIPC_CONS : system console
99 * TIPC_LOG : Output to TIPC log buffer 99 * TIPC_LOG : TIPC log buffer
100 * &buf : Output to user-defined buffer (struct print_buf *) 100 * &buf : user-defined buffer (struct print_buf *)
101 * TIPC_TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG) ) 101 * TIPC_TEE(&buf_a,&buf_b) : list of buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG))
102 */ 102 */
103 103
104#ifndef TIPC_OUTPUT 104#ifndef TIPC_OUTPUT
@@ -106,7 +106,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
106#endif 106#endif
107 107
108#ifndef DBG_OUTPUT 108#ifndef DBG_OUTPUT
109#define DBG_OUTPUT NULL 109#define DBG_OUTPUT TIPC_NULL
110#endif 110#endif
111 111
112#else 112#else
@@ -136,7 +136,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
136#define TIPC_OUTPUT TIPC_CONS 136#define TIPC_OUTPUT TIPC_CONS
137 137
138#undef DBG_OUTPUT 138#undef DBG_OUTPUT
139#define DBG_OUTPUT NULL 139#define DBG_OUTPUT TIPC_NULL
140 140
141#endif 141#endif
142 142
@@ -275,11 +275,15 @@ static inline void k_term_timer(struct timer_list *timer)
275/* 275/*
276 * TIPC message buffer code 276 * TIPC message buffer code
277 * 277 *
278 * TIPC message buffer headroom leaves room for 14 byte Ethernet header, 278 * TIPC message buffer headroom reserves space for a link-level header
279 * (in case the message is sent off-node),
279 * while ensuring TIPC header is word aligned for quicker access 280 * while ensuring TIPC header is word aligned for quicker access
281 *
282 * The largest header currently supported is 18 bytes, which is used when
283 * the standard 14 byte Ethernet header has 4 added bytes for VLAN info
280 */ 284 */
281 285
282#define BUF_HEADROOM 16u 286#define BUF_HEADROOM 20u
283 287
284struct tipc_skb_cb { 288struct tipc_skb_cb {
285 void *handle; 289 void *handle;
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 55130655e1ed..d8af4c28695d 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/dbg.c: TIPC print buffer routines for debuggign 2 * net/tipc/dbg.c: TIPC print buffer routines for debugging
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -38,11 +38,12 @@
38#include "config.h" 38#include "config.h"
39#include "dbg.h" 39#include "dbg.h"
40 40
41#define MAX_STRING 512 41static char print_string[TIPC_PB_MAX_STR];
42
43static char print_string[MAX_STRING];
44static DEFINE_SPINLOCK(print_lock); 42static DEFINE_SPINLOCK(print_lock);
45 43
44static struct print_buf null_buf = { NULL, 0, NULL, NULL };
45struct print_buf *TIPC_NULL = &null_buf;
46
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 47static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *TIPC_CONS = &cons_buf; 48struct print_buf *TIPC_CONS = &cons_buf;
48 49
@@ -62,68 +63,83 @@ struct print_buf *TIPC_LOG = &log_buf;
62/* 63/*
63 * Locking policy when using print buffers. 64 * Locking policy when using print buffers.
64 * 65 *
65 * 1) Routines of the form printbuf_XXX() rely on the caller to prevent 66 * The following routines use 'print_lock' for protection:
66 * simultaneous use of the print buffer(s) being manipulated. 67 * 1) tipc_printf() - to protect its print buffer(s) and 'print_string'
67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of 68 * 2) TIPC_TEE() - to protect its print buffer(s)
68 * 'print_string' and to protect its print buffer(s). 69 * 3) tipc_dump() - to protect its print buffer(s) and 'print_string'
69 * 3) TIPC_TEE() uses 'print_lock' to protect its print buffer(s). 70 * 4) tipc_log_XXX() - to protect TIPC_LOG
70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect TIPC_LOG. 71 *
72 * All routines of the form tipc_printbuf_XXX() rely on the caller to prevent
73 * simultaneous use of the print buffer(s) being manipulated.
71 */ 74 */
72 75
73/** 76/**
74 * tipc_printbuf_init - initialize print buffer to empty 77 * tipc_printbuf_init - initialize print buffer to empty
78 * @pb: pointer to print buffer structure
79 * @raw: pointer to character array used by print buffer
80 * @size: size of character array
81 *
82 * Makes the print buffer a null device that discards anything written to it
83 * if the character array is too small (or absent).
75 */ 84 */
76 85
77void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 sz) 86void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
78{ 87{
79 if (!pb || !raw || (sz < (MAX_STRING + 1))) 88 pb->buf = raw;
80 return; 89 pb->crs = raw;
81 90 pb->size = size;
82 pb->crs = pb->buf = raw;
83 pb->size = sz;
84 pb->next = NULL; 91 pb->next = NULL;
85 pb->buf[0] = 0; 92
86 pb->buf[sz-1] = ~0; 93 if (size < TIPC_PB_MIN_SIZE) {
94 pb->buf = NULL;
95 } else if (raw) {
96 pb->buf[0] = 0;
97 pb->buf[size-1] = ~0;
98 }
87} 99}
88 100
89/** 101/**
90 * tipc_printbuf_reset - reinitialize print buffer to empty state 102 * tipc_printbuf_reset - reinitialize print buffer to empty state
103 * @pb: pointer to print buffer structure
91 */ 104 */
92 105
93void tipc_printbuf_reset(struct print_buf *pb) 106void tipc_printbuf_reset(struct print_buf *pb)
94{ 107{
95 if (pb && pb->buf) 108 tipc_printbuf_init(pb, pb->buf, pb->size);
96 tipc_printbuf_init(pb, pb->buf, pb->size);
97} 109}
98 110
99/** 111/**
100 * tipc_printbuf_empty - test if print buffer is in empty state 112 * tipc_printbuf_empty - test if print buffer is in empty state
113 * @pb: pointer to print buffer structure
114 *
115 * Returns non-zero if print buffer is empty.
101 */ 116 */
102 117
103int tipc_printbuf_empty(struct print_buf *pb) 118int tipc_printbuf_empty(struct print_buf *pb)
104{ 119{
105 return (!pb || !pb->buf || (pb->crs == pb->buf)); 120 return (!pb->buf || (pb->crs == pb->buf));
106} 121}
107 122
108/** 123/**
109 * tipc_printbuf_validate - check for print buffer overflow 124 * tipc_printbuf_validate - check for print buffer overflow
125 * @pb: pointer to print buffer structure
110 * 126 *
111 * Verifies that a print buffer has captured all data written to it. 127 * Verifies that a print buffer has captured all data written to it.
112 * If data has been lost, linearize buffer and prepend an error message 128 * If data has been lost, linearize buffer and prepend an error message
113 * 129 *
114 * Returns length of print buffer data string (including trailing NULL) 130 * Returns length of print buffer data string (including trailing NUL)
115 */ 131 */
116 132
117int tipc_printbuf_validate(struct print_buf *pb) 133int tipc_printbuf_validate(struct print_buf *pb)
118{ 134{
119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n"; 135 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
120 char *cp_buf; 136 char *cp_buf;
121 struct print_buf cb; 137 struct print_buf cb;
122 138
123 if (!pb || !pb->buf) 139 if (!pb->buf)
124 return 0; 140 return 0;
125 141
126 if (pb->buf[pb->size - 1] == '\0') { 142 if (pb->buf[pb->size - 1] == 0) {
127 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 143 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
128 if (cp_buf != NULL){ 144 if (cp_buf != NULL){
129 tipc_printbuf_init(&cb, cp_buf, pb->size); 145 tipc_printbuf_init(&cb, cp_buf, pb->size);
@@ -141,6 +157,8 @@ int tipc_printbuf_validate(struct print_buf *pb)
141 157
142/** 158/**
143 * tipc_printbuf_move - move print buffer contents to another print buffer 159 * tipc_printbuf_move - move print buffer contents to another print buffer
160 * @pb_to: pointer to destination print buffer structure
161 * @pb_from: pointer to source print buffer structure
144 * 162 *
145 * Current contents of destination print buffer (if any) are discarded. 163 * Current contents of destination print buffer (if any) are discarded.
146 * Source print buffer becomes empty if a successful move occurs. 164 * Source print buffer becomes empty if a successful move occurs.
@@ -152,21 +170,22 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
152 170
153 /* Handle the cases where contents can't be moved */ 171 /* Handle the cases where contents can't be moved */
154 172
155 if (!pb_to || !pb_to->buf) 173 if (!pb_to->buf)
156 return; 174 return;
157 175
158 if (!pb_from || !pb_from->buf) { 176 if (!pb_from->buf) {
159 tipc_printbuf_reset(pb_to); 177 tipc_printbuf_reset(pb_to);
160 return; 178 return;
161 } 179 }
162 180
163 if (pb_to->size < pb_from->size) { 181 if (pb_to->size < pb_from->size) {
164 tipc_printbuf_reset(pb_to); 182 tipc_printbuf_reset(pb_to);
165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***"); 183 tipc_printf(pb_to, "*** PRINT BUFFER MOVE ERROR ***");
166 return; 184 return;
167 } 185 }
168 186
169 /* Copy data from char after cursor to end (if used) */ 187 /* Copy data from char after cursor to end (if used) */
188
170 len = pb_from->buf + pb_from->size - pb_from->crs - 2; 189 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
171 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) { 190 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
172 strcpy(pb_to->buf, pb_from->crs + 1); 191 strcpy(pb_to->buf, pb_from->crs + 1);
@@ -175,6 +194,7 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
175 pb_to->crs = pb_to->buf; 194 pb_to->crs = pb_to->buf;
176 195
177 /* Copy data from start to cursor (always) */ 196 /* Copy data from start to cursor (always) */
197
178 len = pb_from->crs - pb_from->buf; 198 len = pb_from->crs - pb_from->buf;
179 strcpy(pb_to->crs, pb_from->buf); 199 strcpy(pb_to->crs, pb_from->buf);
180 pb_to->crs += len; 200 pb_to->crs += len;
@@ -184,6 +204,8 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
184 204
185/** 205/**
186 * tipc_printf - append formatted output to print buffer chain 206 * tipc_printf - append formatted output to print buffer chain
207 * @pb: pointer to chain of print buffers (may be NULL)
208 * @fmt: formatted info to be printed
187 */ 209 */
188 210
189void tipc_printf(struct print_buf *pb, const char *fmt, ...) 211void tipc_printf(struct print_buf *pb, const char *fmt, ...)
@@ -195,8 +217,8 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
195 217
196 spin_lock_bh(&print_lock); 218 spin_lock_bh(&print_lock);
197 FORMAT(print_string, chars_to_add, fmt); 219 FORMAT(print_string, chars_to_add, fmt);
198 if (chars_to_add >= MAX_STRING) 220 if (chars_to_add >= TIPC_PB_MAX_STR)
199 strcpy(print_string, "*** STRING TOO LONG ***"); 221 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
200 222
201 while (pb) { 223 while (pb) {
202 if (pb == TIPC_CONS) 224 if (pb == TIPC_CONS)
@@ -206,6 +228,10 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
206 if (chars_to_add <= chars_left) { 228 if (chars_to_add <= chars_left) {
207 strcpy(pb->crs, print_string); 229 strcpy(pb->crs, print_string);
208 pb->crs += chars_to_add; 230 pb->crs += chars_to_add;
231 } else if (chars_to_add >= (pb->size - 1)) {
232 strcpy(pb->buf, print_string + chars_to_add + 1
233 - pb->size);
234 pb->crs = pb->buf + pb->size - 1;
209 } else { 235 } else {
210 strcpy(pb->buf, print_string + chars_left); 236 strcpy(pb->buf, print_string + chars_left);
211 save_char = print_string[chars_left]; 237 save_char = print_string[chars_left];
@@ -224,6 +250,10 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
224 250
225/** 251/**
226 * TIPC_TEE - perform next output operation on both print buffers 252 * TIPC_TEE - perform next output operation on both print buffers
253 * @b0: pointer to chain of print buffers (may be NULL)
254 * @b1: pointer to print buffer to add to chain
255 *
256 * Returns pointer to print buffer chain.
227 */ 257 */
228 258
229struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1) 259struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
@@ -232,8 +262,6 @@ struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
232 262
233 if (!b0 || (b0 == b1)) 263 if (!b0 || (b0 == b1))
234 return b1; 264 return b1;
235 if (!b1)
236 return b0;
237 265
238 spin_lock_bh(&print_lock); 266 spin_lock_bh(&print_lock);
239 while (pb->next) { 267 while (pb->next) {
@@ -256,7 +284,7 @@ static void print_to_console(char *crs, int len)
256 int rest = len; 284 int rest = len;
257 285
258 while (rest > 0) { 286 while (rest > 0) {
259 int sz = rest < MAX_STRING ? rest : MAX_STRING; 287 int sz = rest < TIPC_PB_MAX_STR ? rest : TIPC_PB_MAX_STR;
260 char c = crs[sz]; 288 char c = crs[sz];
261 289
262 crs[sz] = 0; 290 crs[sz] = 0;
@@ -275,36 +303,48 @@ static void printbuf_dump(struct print_buf *pb)
275{ 303{
276 int len; 304 int len;
277 305
306 if (!pb->buf) {
307 printk("*** PRINT BUFFER NOT ALLOCATED ***");
308 return;
309 }
310
278 /* Dump print buffer from char after cursor to end (if used) */ 311 /* Dump print buffer from char after cursor to end (if used) */
312
279 len = pb->buf + pb->size - pb->crs - 2; 313 len = pb->buf + pb->size - pb->crs - 2;
280 if ((pb->buf[pb->size - 1] == 0) && (len > 0)) 314 if ((pb->buf[pb->size - 1] == 0) && (len > 0))
281 print_to_console(pb->crs + 1, len); 315 print_to_console(pb->crs + 1, len);
282 316
283 /* Dump print buffer from start to cursor (always) */ 317 /* Dump print buffer from start to cursor (always) */
318
284 len = pb->crs - pb->buf; 319 len = pb->crs - pb->buf;
285 print_to_console(pb->buf, len); 320 print_to_console(pb->buf, len);
286} 321}
287 322
288/** 323/**
289 * tipc_dump - dump non-console print buffer(s) to console 324 * tipc_dump - dump non-console print buffer(s) to console
325 * @pb: pointer to chain of print buffers
290 */ 326 */
291 327
292void tipc_dump(struct print_buf *pb, const char *fmt, ...) 328void tipc_dump(struct print_buf *pb, const char *fmt, ...)
293{ 329{
330 struct print_buf *pb_next;
294 int len; 331 int len;
295 332
296 spin_lock_bh(&print_lock); 333 spin_lock_bh(&print_lock);
297 FORMAT(TIPC_CONS->buf, len, fmt); 334 FORMAT(print_string, len, fmt);
298 printk(TIPC_CONS->buf); 335 printk(print_string);
299 336
300 for (; pb; pb = pb->next) { 337 for (; pb; pb = pb->next) {
301 if (pb == TIPC_CONS) 338 if (pb != TIPC_CONS) {
302 continue; 339 printk("\n---- Start of %s log dump ----\n\n",
303 printk("\n---- Start of dump,%s log ----\n\n", 340 (pb == TIPC_LOG) ? "global" : "local");
304 (pb == TIPC_LOG) ? "global" : "local"); 341 printbuf_dump(pb);
305 printbuf_dump(pb); 342 tipc_printbuf_reset(pb);
306 tipc_printbuf_reset(pb); 343 printk("\n---- End of dump ----\n");
307 printk("\n-------- End of dump --------\n"); 344 }
345 pb_next = pb->next;
346 pb->next = NULL;
347 pb = pb_next;
308 } 348 }
309 spin_unlock_bh(&print_lock); 349 spin_unlock_bh(&print_lock);
310} 350}
@@ -324,7 +364,8 @@ void tipc_log_stop(void)
324} 364}
325 365
326/** 366/**
327 * tipc_log_reinit - set TIPC log print buffer to specified size 367 * tipc_log_reinit - (re)initialize TIPC log print buffer
368 * @log_size: print buffer size to use
328 */ 369 */
329 370
330void tipc_log_reinit(int log_size) 371void tipc_log_reinit(int log_size)
@@ -332,10 +373,11 @@ void tipc_log_reinit(int log_size)
332 tipc_log_stop(); 373 tipc_log_stop();
333 374
334 if (log_size) { 375 if (log_size) {
335 if (log_size <= MAX_STRING) 376 if (log_size < TIPC_PB_MIN_SIZE)
336 log_size = MAX_STRING + 1; 377 log_size = TIPC_PB_MIN_SIZE;
337 spin_lock_bh(&print_lock); 378 spin_lock_bh(&print_lock);
338 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), log_size); 379 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
380 log_size);
339 spin_unlock_bh(&print_lock); 381 spin_unlock_bh(&print_lock);
340 } 382 }
341} 383}
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index 227f050d2a52..467c0bc78a79 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines 2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 * 3 *
4 * Copyright (c) 1997-2006, Ericsson AB 4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,14 @@
37#ifndef _TIPC_DBG_H 37#ifndef _TIPC_DBG_H
38#define _TIPC_DBG_H 38#define _TIPC_DBG_H
39 39
40/**
41 * struct print_buf - TIPC print buffer structure
42 * @buf: pointer to character array containing print buffer contents
43 * @size: size of character array
44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @next: used to link print buffers when printing to more than one at a time
46 */
47
40struct print_buf { 48struct print_buf {
41 char *buf; 49 char *buf;
42 u32 size; 50 u32 size;
@@ -44,7 +52,10 @@ struct print_buf {
44 struct print_buf *next; 52 struct print_buf *next;
45}; 53};
46 54
47void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 sz); 55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
56#define TIPC_PB_MAX_STR 512 /* max printable string (with trailing NUL) */
57
58void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 size);
48void tipc_printbuf_reset(struct print_buf *pb); 59void tipc_printbuf_reset(struct print_buf *pb);
49int tipc_printbuf_empty(struct print_buf *pb); 60int tipc_printbuf_empty(struct print_buf *pb);
50int tipc_printbuf_validate(struct print_buf *pb); 61int tipc_printbuf_validate(struct print_buf *pb);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ee94de92ae99..3b0cd12f37da 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -132,6 +132,28 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
132} 132}
133 133
134/** 134/**
135 * disc_dupl_alert - issue node address duplication alert
136 * @b_ptr: pointer to bearer detecting duplication
137 * @node_addr: duplicated node address
138 * @media_addr: media address advertised by duplicated node
139 */
140
141static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
142 struct tipc_media_addr *media_addr)
143{
144 char node_addr_str[16];
145 char media_addr_str[64];
146 struct print_buf pb;
147
148 addr_string_fill(node_addr_str, node_addr);
149 tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
150 tipc_media_addr_printf(&pb, media_addr);
151 tipc_printbuf_validate(&pb);
152 warn("Duplicate %s using %s seen on <%s>\n",
153 node_addr_str, media_addr_str, b_ptr->publ.name);
154}
155
156/**
135 * tipc_disc_recv_msg - handle incoming link setup message (request or response) 157 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
136 * @buf: buffer containing message 158 * @buf: buffer containing message
137 */ 159 */
@@ -157,8 +179,11 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
157 return; 179 return;
158 if (!tipc_addr_node_valid(orig)) 180 if (!tipc_addr_node_valid(orig))
159 return; 181 return;
160 if (orig == tipc_own_addr) 182 if (orig == tipc_own_addr) {
183 if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
184 disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
161 return; 185 return;
186 }
162 if (!in_scope(dest, tipc_own_addr)) 187 if (!in_scope(dest, tipc_own_addr))
163 return; 188 return;
164 if (is_slave(tipc_own_addr) && is_slave(orig)) 189 if (is_slave(tipc_own_addr) && is_slave(orig))
@@ -170,7 +195,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
170 struct sk_buff *rbuf; 195 struct sk_buff *rbuf;
171 struct tipc_media_addr *addr; 196 struct tipc_media_addr *addr;
172 struct node *n_ptr = tipc_node_find(orig); 197 struct node *n_ptr = tipc_node_find(orig);
173 int link_up; 198 int link_fully_up;
199
174 dbg(" in own cluster\n"); 200 dbg(" in own cluster\n");
175 if (n_ptr == NULL) { 201 if (n_ptr == NULL) {
176 n_ptr = tipc_node_create(orig); 202 n_ptr = tipc_node_create(orig);
@@ -190,14 +216,19 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
190 } 216 }
191 addr = &link->media_addr; 217 addr = &link->media_addr;
192 if (memcmp(addr, &media_addr, sizeof(*addr))) { 218 if (memcmp(addr, &media_addr, sizeof(*addr))) {
219 if (tipc_link_is_up(link) || (!link->started)) {
220 disc_dupl_alert(b_ptr, orig, &media_addr);
221 spin_unlock_bh(&n_ptr->lock);
222 return;
223 }
193 warn("Resetting link <%s>, peer interface address changed\n", 224 warn("Resetting link <%s>, peer interface address changed\n",
194 link->name); 225 link->name);
195 memcpy(addr, &media_addr, sizeof(*addr)); 226 memcpy(addr, &media_addr, sizeof(*addr));
196 tipc_link_reset(link); 227 tipc_link_reset(link);
197 } 228 }
198 link_up = tipc_link_is_up(link); 229 link_fully_up = (link->state == WORKING_WORKING);
199 spin_unlock_bh(&n_ptr->lock); 230 spin_unlock_bh(&n_ptr->lock);
200 if ((type == DSC_RESP_MSG) || link_up) 231 if ((type == DSC_RESP_MSG) || link_fully_up)
201 return; 232 return;
202 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr); 233 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
203 if (rbuf != NULL) { 234 if (rbuf != NULL) {
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 693f02eca6d6..1bb983c8130b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -132,7 +132,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
132 * allow the output from multiple links to be intermixed. For this reason 132 * allow the output from multiple links to be intermixed. For this reason
133 * routines of the form "dbg_link_XXX()" have been created that will capture 133 * routines of the form "dbg_link_XXX()" have been created that will capture
134 * debug info into a link's personal print buffer, which can then be dumped 134 * debug info into a link's personal print buffer, which can then be dumped
135 * into the TIPC system log (LOG) upon request. 135 * into the TIPC system log (TIPC_LOG) upon request.
136 * 136 *
137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size 137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0, 138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
@@ -141,7 +141,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
141 * when there is only a single link in the system being debugged. 141 * when there is only a single link in the system being debugged.
142 * 142 *
143 * Notes: 143 * Notes:
144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes) 144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least TIPC_PB_MIN_SIZE
145 * - "l_ptr" must be valid when using dbg_link_XXX() macros 145 * - "l_ptr" must be valid when using dbg_link_XXX() macros
146 */ 146 */
147 147
@@ -159,13 +159,13 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
159 159
160static void dbg_print_link(struct link *l_ptr, const char *str) 160static void dbg_print_link(struct link *l_ptr, const char *str)
161{ 161{
162 if (DBG_OUTPUT) 162 if (DBG_OUTPUT != TIPC_NULL)
163 link_print(l_ptr, DBG_OUTPUT, str); 163 link_print(l_ptr, DBG_OUTPUT, str);
164} 164}
165 165
166static void dbg_print_buf_chain(struct sk_buff *root_buf) 166static void dbg_print_buf_chain(struct sk_buff *root_buf)
167{ 167{
168 if (DBG_OUTPUT) { 168 if (DBG_OUTPUT != TIPC_NULL) {
169 struct sk_buff *buf = root_buf; 169 struct sk_buff *buf = root_buf;
170 170
171 while (buf) { 171 while (buf) {
@@ -1666,8 +1666,9 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1666 char addr_string[16]; 1666 char addr_string[16];
1667 1667
1668 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg)); 1668 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
1669 tipc_printf(TIPC_OUTPUT, "Outstanding acks: %u\n", (u32)TIPC_SKB_CB(buf)->handle); 1669 tipc_printf(TIPC_OUTPUT, "Outstanding acks: %lu\n",
1670 1670 (unsigned long) TIPC_SKB_CB(buf)->handle);
1671
1671 n_ptr = l_ptr->owner->next; 1672 n_ptr = l_ptr->owner->next;
1672 tipc_node_lock(n_ptr); 1673 tipc_node_lock(n_ptr);
1673 1674
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index f0b063bcc2a9..03bd659c43ca 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -122,7 +122,7 @@ void tipc_named_publish(struct publication *publ)
122 struct sk_buff *buf; 122 struct sk_buff *buf;
123 struct distr_item *item; 123 struct distr_item *item;
124 124
125 list_add(&publ->local_list, &publ_root); 125 list_add_tail(&publ->local_list, &publ_root);
126 publ_cnt++; 126 publ_cnt++;
127 127
128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fc6d09630ccd..886bda5e88db 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -648,7 +648,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
648 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 648 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
649 " (network address)"); 649 " (network address)");
650 650
651 if (!tipc_nodes) 651 if (tipc_mode != TIPC_NET_MODE)
652 return tipc_cfg_reply_none(); 652 return tipc_cfg_reply_none();
653 653
654 /* Get space for all unicast links + multicast link */ 654 /* Get space for all unicast links + multicast link */
diff --git a/net/tipc/port.c b/net/tipc/port.c
index b9c8c6b9e94f..c1a1a76759b5 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -505,8 +505,13 @@ static void port_timeout(unsigned long ref)
505 struct port *p_ptr = tipc_port_lock(ref); 505 struct port *p_ptr = tipc_port_lock(ref);
506 struct sk_buff *buf = NULL; 506 struct sk_buff *buf = NULL;
507 507
508 if (!p_ptr || !p_ptr->publ.connected) 508 if (!p_ptr)
509 return;
510
511 if (!p_ptr->publ.connected) {
512 tipc_port_unlock(p_ptr);
509 return; 513 return;
514 }
510 515
511 /* Last probe answered ? */ 516 /* Last probe answered ? */
512 if (p_ptr->probing_state == PROBING) { 517 if (p_ptr->probing_state == PROBING) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 32d778448a00..2a6a5a6b4c12 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2,7 +2,7 @@
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -629,6 +629,9 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
629 return -ENOTCONN; 629 return -ENOTCONN;
630 } 630 }
631 631
632 if (unlikely(m->msg_name))
633 return -EISCONN;
634
632 /* 635 /*
633 * Send each iovec entry using one or more messages 636 * Send each iovec entry using one or more messages
634 * 637 *
@@ -641,6 +644,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
641 curr_iovlen = m->msg_iovlen; 644 curr_iovlen = m->msg_iovlen;
642 my_msg.msg_iov = &my_iov; 645 my_msg.msg_iov = &my_iov;
643 my_msg.msg_iovlen = 1; 646 my_msg.msg_iovlen = 1;
647 my_msg.msg_flags = m->msg_flags;
648 my_msg.msg_name = NULL;
644 bytes_sent = 0; 649 bytes_sent = 0;
645 650
646 while (curr_iovlen--) { 651 while (curr_iovlen--) {
@@ -941,7 +946,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
941 int sz_to_copy; 946 int sz_to_copy;
942 int sz_copied = 0; 947 int sz_copied = 0;
943 int needed; 948 int needed;
944 char *crs = m->msg_iov->iov_base; 949 char __user *crs = m->msg_iov->iov_base;
945 unsigned char *buf_crs; 950 unsigned char *buf_crs;
946 u32 err; 951 u32 err;
947 int res; 952 int res;
@@ -1203,7 +1208,8 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1203 atomic_inc(&tipc_queue_size); 1208 atomic_inc(&tipc_queue_size);
1204 skb_queue_tail(&sock->sk->sk_receive_queue, buf); 1209 skb_queue_tail(&sock->sk->sk_receive_queue, buf);
1205 1210
1206 wake_up_interruptible(sock->sk->sk_sleep); 1211 if (waitqueue_active(sock->sk->sk_sleep))
1212 wake_up_interruptible(sock->sk->sk_sleep);
1207 return TIPC_OK; 1213 return TIPC_OK;
1208} 1214}
1209 1215
@@ -1218,7 +1224,8 @@ static void wakeupdispatch(struct tipc_port *tport)
1218{ 1224{
1219 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle; 1225 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1220 1226
1221 wake_up_interruptible(tsock->sk.sk_sleep); 1227 if (waitqueue_active(tsock->sk.sk_sleep))
1228 wake_up_interruptible(tsock->sk.sk_sleep);
1222} 1229}
1223 1230
1224/** 1231/**
@@ -1496,7 +1503,7 @@ static int setsockopt(struct socket *sock,
1496 return -ENOPROTOOPT; 1503 return -ENOPROTOOPT;
1497 if (ol < sizeof(value)) 1504 if (ol < sizeof(value))
1498 return -EINVAL; 1505 return -EINVAL;
1499 if ((res = get_user(value, (u32 *)ov))) 1506 if ((res = get_user(value, (u32 __user *)ov)))
1500 return res; 1507 return res;
1501 1508
1502 if (down_interruptible(&tsock->sem)) 1509 if (down_interruptible(&tsock->sem))
@@ -1541,7 +1548,7 @@ static int setsockopt(struct socket *sock,
1541 */ 1548 */
1542 1549
1543static int getsockopt(struct socket *sock, 1550static int getsockopt(struct socket *sock,
1544 int lvl, int opt, char __user *ov, int *ol) 1551 int lvl, int opt, char __user *ov, int __user *ol)
1545{ 1552{
1546 struct tipc_sock *tsock = tipc_sk(sock->sk); 1553 struct tipc_sock *tsock = tipc_sk(sock->sk);
1547 int len; 1554 int len;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index c51600ba5f4a..7a918f12a5df 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -155,7 +155,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
155 sub->seq.upper, found_lower, found_upper); 155 sub->seq.upper, found_lower, found_upper);
156 if (!tipc_subscr_overlap(sub, found_lower, found_upper)) 156 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return; 157 return;
158 if (!must && (sub->filter != TIPC_SUB_PORTS)) 158 if (!must && !(sub->filter & TIPC_SUB_PORTS))
159 return; 159 return;
160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); 160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
161} 161}
@@ -176,6 +176,13 @@ static void subscr_timeout(struct subscription *sub)
176 if (subscriber == NULL) 176 if (subscriber == NULL)
177 return; 177 return;
178 178
179 /* Validate timeout (in case subscription is being cancelled) */
180
181 if (sub->timeout == TIPC_WAIT_FOREVER) {
182 tipc_ref_unlock(subscriber_ref);
183 return;
184 }
185
179 /* Unlink subscription from name table */ 186 /* Unlink subscription from name table */
180 187
181 tipc_nametbl_unsubscribe(sub); 188 tipc_nametbl_unsubscribe(sub);
@@ -199,6 +206,20 @@ static void subscr_timeout(struct subscription *sub)
199} 206}
200 207
201/** 208/**
209 * subscr_del - delete a subscription within a subscription list
210 *
211 * Called with subscriber locked.
212 */
213
214static void subscr_del(struct subscription *sub)
215{
216 tipc_nametbl_unsubscribe(sub);
217 list_del(&sub->subscription_list);
218 kfree(sub);
219 atomic_dec(&topsrv.subscription_count);
220}
221
222/**
202 * subscr_terminate - terminate communication with a subscriber 223 * subscr_terminate - terminate communication with a subscriber
203 * 224 *
204 * Called with subscriber locked. Routine must temporarily release this lock 225 * Called with subscriber locked. Routine must temporarily release this lock
@@ -227,12 +248,9 @@ static void subscr_terminate(struct subscriber *subscriber)
227 k_cancel_timer(&sub->timer); 248 k_cancel_timer(&sub->timer);
228 k_term_timer(&sub->timer); 249 k_term_timer(&sub->timer);
229 } 250 }
230 tipc_nametbl_unsubscribe(sub); 251 dbg("Term: Removing sub %u,%u,%u from subscriber %x list\n",
231 list_del(&sub->subscription_list);
232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); 252 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
234 kfree(sub); 253 subscr_del(sub);
235 atomic_dec(&topsrv.subscription_count);
236 } 254 }
237 255
238 /* Sever connection to subscriber */ 256 /* Sever connection to subscriber */
@@ -253,6 +271,49 @@ static void subscr_terminate(struct subscriber *subscriber)
253} 271}
254 272
255/** 273/**
274 * subscr_cancel - handle subscription cancellation request
275 *
276 * Called with subscriber locked. Routine must temporarily release this lock
277 * to enable the subscription timeout routine to finish without deadlocking;
278 * the lock is then reclaimed to allow caller to release it upon return.
279 *
280 * Note that fields of 's' use subscriber's endianness!
281 */
282
283static void subscr_cancel(struct tipc_subscr *s,
284 struct subscriber *subscriber)
285{
286 struct subscription *sub;
287 struct subscription *sub_temp;
288 int found = 0;
289
290 /* Find first matching subscription, exit if not found */
291
292 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
293 subscription_list) {
294 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
295 found = 1;
296 break;
297 }
298 }
299 if (!found)
300 return;
301
302 /* Cancel subscription timer (if used), then delete subscription */
303
304 if (sub->timeout != TIPC_WAIT_FOREVER) {
305 sub->timeout = TIPC_WAIT_FOREVER;
306 spin_unlock_bh(subscriber->lock);
307 k_cancel_timer(&sub->timer);
308 k_term_timer(&sub->timer);
309 spin_lock_bh(subscriber->lock);
310 }
311 dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n",
312 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
313 subscr_del(sub);
314}
315
316/**
256 * subscr_subscribe - create subscription for subscriber 317 * subscr_subscribe - create subscription for subscriber
257 * 318 *
258 * Called with subscriber locked 319 * Called with subscriber locked
@@ -263,6 +324,21 @@ static void subscr_subscribe(struct tipc_subscr *s,
263{ 324{
264 struct subscription *sub; 325 struct subscription *sub;
265 326
327 /* Determine/update subscriber's endianness */
328
329 if (s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE))
330 subscriber->swap = 0;
331 else
332 subscriber->swap = 1;
333
334 /* Detect & process a subscription cancellation request */
335
336 if (s->filter & htohl(TIPC_SUB_CANCEL, subscriber->swap)) {
337 s->filter &= ~htohl(TIPC_SUB_CANCEL, subscriber->swap);
338 subscr_cancel(s, subscriber);
339 return;
340 }
341
266 /* Refuse subscription if global limit exceeded */ 342 /* Refuse subscription if global limit exceeded */
267 343
268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 344 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
@@ -281,13 +357,6 @@ static void subscr_subscribe(struct tipc_subscr *s,
281 return; 357 return;
282 } 358 }
283 359
284 /* Determine/update subscriber's endianness */
285
286 if ((s->filter == TIPC_SUB_PORTS) || (s->filter == TIPC_SUB_SERVICE))
287 subscriber->swap = 0;
288 else
289 subscriber->swap = 1;
290
291 /* Initialize subscription object */ 360 /* Initialize subscription object */
292 361
293 memset(sub, 0, sizeof(*sub)); 362 memset(sub, 0, sizeof(*sub));
@@ -296,8 +365,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
296 sub->seq.upper = htohl(s->seq.upper, subscriber->swap); 365 sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
297 sub->timeout = htohl(s->timeout, subscriber->swap); 366 sub->timeout = htohl(s->timeout, subscriber->swap);
298 sub->filter = htohl(s->filter, subscriber->swap); 367 sub->filter = htohl(s->filter, subscriber->swap);
299 if ((((sub->filter != TIPC_SUB_PORTS) 368 if ((!(sub->filter & TIPC_SUB_PORTS)
300 && (sub->filter != TIPC_SUB_SERVICE))) 369 == !(sub->filter & TIPC_SUB_SERVICE))
301 || (sub->seq.lower > sub->seq.upper)) { 370 || (sub->seq.lower > sub->seq.upper)) {
302 warn("Subscription rejected, illegal request\n"); 371 warn("Subscription rejected, illegal request\n");
303 kfree(sub); 372 kfree(sub);
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index d3abb0b7dc62..d401dc8f05ed 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -41,27 +41,28 @@ static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t
41 return (h ^ (h >> 16)) & hmask; 41 return (h ^ (h >> 16)) & hmask;
42} 42}
43 43
44static inline unsigned __xfrm_src_hash(xfrm_address_t *saddr, 44static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
45 xfrm_address_t *saddr,
45 unsigned short family, 46 unsigned short family,
46 unsigned int hmask) 47 unsigned int hmask)
47{ 48{
48 unsigned int h = family; 49 unsigned int h = family;
49 switch (family) { 50 switch (family) {
50 case AF_INET: 51 case AF_INET:
51 h ^= __xfrm4_addr_hash(saddr); 52 h ^= __xfrm4_daddr_saddr_hash(daddr, saddr);
52 break; 53 break;
53 case AF_INET6: 54 case AF_INET6:
54 h ^= __xfrm6_addr_hash(saddr); 55 h ^= __xfrm6_daddr_saddr_hash(daddr, saddr);
55 break; 56 break;
56 }; 57 };
57 return (h ^ (h >> 16)) & hmask; 58 return (h ^ (h >> 16)) & hmask;
58} 59}
59 60
60static inline unsigned int 61static inline unsigned int
61__xfrm_spi_hash(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family, 62__xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family,
62 unsigned int hmask) 63 unsigned int hmask)
63{ 64{
64 unsigned int h = spi ^ proto; 65 unsigned int h = (__force u32)spi ^ proto;
65 switch (family) { 66 switch (family) {
66 case AF_INET: 67 case AF_INET:
67 h ^= __xfrm4_addr_hash(daddr); 68 h ^= __xfrm4_addr_hash(daddr);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index dfc90bb1cf1f..e8198a2c785d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(secpath_dup);
46 46
47/* Fetch spi and seq from ipsec header */ 47/* Fetch spi and seq from ipsec header */
48 48
49int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq) 49int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
50{ 50{
51 int offset, offset_seq; 51 int offset, offset_seq;
52 52
@@ -62,7 +62,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
62 case IPPROTO_COMP: 62 case IPPROTO_COMP:
63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
64 return -EINVAL; 64 return -EINVAL;
65 *spi = htonl(ntohs(*(u16*)(skb->h.raw + 2))); 65 *spi = htonl(ntohs(*(__be16*)(skb->h.raw + 2)));
66 *seq = 0; 66 *seq = 0;
67 return 0; 67 return 0;
68 default: 68 default:
@@ -72,8 +72,8 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
72 if (!pskb_may_pull(skb, 16)) 72 if (!pskb_may_pull(skb, 16))
73 return -EINVAL; 73 return -EINVAL;
74 74
75 *spi = *(u32*)(skb->h.raw + offset); 75 *spi = *(__be32*)(skb->h.raw + offset);
76 *seq = *(u32*)(skb->h.raw + offset_seq); 76 *seq = *(__be32*)(skb->h.raw + offset_seq);
77 return 0; 77 return 0;
78} 78}
79EXPORT_SYMBOL(xfrm_parse_spi); 79EXPORT_SYMBOL(xfrm_parse_spi);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b6e2e79d7261..7736b23c3f03 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -778,8 +778,9 @@ void xfrm_policy_flush(u8 type)
778 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 778 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
779 struct xfrm_policy *pol; 779 struct xfrm_policy *pol;
780 struct hlist_node *entry; 780 struct hlist_node *entry;
781 int i; 781 int i, killed;
782 782
783 killed = 0;
783 again1: 784 again1:
784 hlist_for_each_entry(pol, entry, 785 hlist_for_each_entry(pol, entry,
785 &xfrm_policy_inexact[dir], bydst) { 786 &xfrm_policy_inexact[dir], bydst) {
@@ -790,6 +791,7 @@ void xfrm_policy_flush(u8 type)
790 write_unlock_bh(&xfrm_policy_lock); 791 write_unlock_bh(&xfrm_policy_lock);
791 792
792 xfrm_policy_kill(pol); 793 xfrm_policy_kill(pol);
794 killed++;
793 795
794 write_lock_bh(&xfrm_policy_lock); 796 write_lock_bh(&xfrm_policy_lock);
795 goto again1; 797 goto again1;
@@ -807,13 +809,14 @@ void xfrm_policy_flush(u8 type)
807 write_unlock_bh(&xfrm_policy_lock); 809 write_unlock_bh(&xfrm_policy_lock);
808 810
809 xfrm_policy_kill(pol); 811 xfrm_policy_kill(pol);
812 killed++;
810 813
811 write_lock_bh(&xfrm_policy_lock); 814 write_lock_bh(&xfrm_policy_lock);
812 goto again2; 815 goto again2;
813 } 816 }
814 } 817 }
815 818
816 xfrm_policy_count[dir] = 0; 819 xfrm_policy_count[dir] -= killed;
817 } 820 }
818 atomic_inc(&flow_cache_genid); 821 atomic_inc(&flow_cache_genid);
819 write_unlock_bh(&xfrm_policy_lock); 822 write_unlock_bh(&xfrm_policy_lock);
@@ -880,30 +883,32 @@ out:
880} 883}
881EXPORT_SYMBOL(xfrm_policy_walk); 884EXPORT_SYMBOL(xfrm_policy_walk);
882 885
883/* Find policy to apply to this flow. */ 886/*
884 887 * Find policy to apply to this flow.
888 *
889 * Returns 0 if policy found, else an -errno.
890 */
885static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl, 891static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
886 u8 type, u16 family, int dir) 892 u8 type, u16 family, int dir)
887{ 893{
888 struct xfrm_selector *sel = &pol->selector; 894 struct xfrm_selector *sel = &pol->selector;
889 int match; 895 int match, ret = -ESRCH;
890 896
891 if (pol->family != family || 897 if (pol->family != family ||
892 pol->type != type) 898 pol->type != type)
893 return 0; 899 return ret;
894 900
895 match = xfrm_selector_match(sel, fl, family); 901 match = xfrm_selector_match(sel, fl, family);
896 if (match) { 902 if (match)
897 if (!security_xfrm_policy_lookup(pol, fl->secid, dir)) 903 ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
898 return 1;
899 }
900 904
901 return 0; 905 return ret;
902} 906}
903 907
904static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl, 908static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
905 u16 family, u8 dir) 909 u16 family, u8 dir)
906{ 910{
911 int err;
907 struct xfrm_policy *pol, *ret; 912 struct xfrm_policy *pol, *ret;
908 xfrm_address_t *daddr, *saddr; 913 xfrm_address_t *daddr, *saddr;
909 struct hlist_node *entry; 914 struct hlist_node *entry;
@@ -919,7 +924,15 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
919 chain = policy_hash_direct(daddr, saddr, family, dir); 924 chain = policy_hash_direct(daddr, saddr, family, dir);
920 ret = NULL; 925 ret = NULL;
921 hlist_for_each_entry(pol, entry, chain, bydst) { 926 hlist_for_each_entry(pol, entry, chain, bydst) {
922 if (xfrm_policy_match(pol, fl, type, family, dir)) { 927 err = xfrm_policy_match(pol, fl, type, family, dir);
928 if (err) {
929 if (err == -ESRCH)
930 continue;
931 else {
932 ret = ERR_PTR(err);
933 goto fail;
934 }
935 } else {
923 ret = pol; 936 ret = pol;
924 priority = ret->priority; 937 priority = ret->priority;
925 break; 938 break;
@@ -927,36 +940,53 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
927 } 940 }
928 chain = &xfrm_policy_inexact[dir]; 941 chain = &xfrm_policy_inexact[dir];
929 hlist_for_each_entry(pol, entry, chain, bydst) { 942 hlist_for_each_entry(pol, entry, chain, bydst) {
930 if (xfrm_policy_match(pol, fl, type, family, dir) && 943 err = xfrm_policy_match(pol, fl, type, family, dir);
931 pol->priority < priority) { 944 if (err) {
945 if (err == -ESRCH)
946 continue;
947 else {
948 ret = ERR_PTR(err);
949 goto fail;
950 }
951 } else if (pol->priority < priority) {
932 ret = pol; 952 ret = pol;
933 break; 953 break;
934 } 954 }
935 } 955 }
936 if (ret) 956 if (ret)
937 xfrm_pol_hold(ret); 957 xfrm_pol_hold(ret);
958fail:
938 read_unlock_bh(&xfrm_policy_lock); 959 read_unlock_bh(&xfrm_policy_lock);
939 960
940 return ret; 961 return ret;
941} 962}
942 963
943static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, 964static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
944 void **objp, atomic_t **obj_refp) 965 void **objp, atomic_t **obj_refp)
945{ 966{
946 struct xfrm_policy *pol; 967 struct xfrm_policy *pol;
968 int err = 0;
947 969
948#ifdef CONFIG_XFRM_SUB_POLICY 970#ifdef CONFIG_XFRM_SUB_POLICY
949 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir); 971 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
950 if (pol) 972 if (IS_ERR(pol)) {
973 err = PTR_ERR(pol);
974 pol = NULL;
975 }
976 if (pol || err)
951 goto end; 977 goto end;
952#endif 978#endif
953 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir); 979 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
954 980 if (IS_ERR(pol)) {
981 err = PTR_ERR(pol);
982 pol = NULL;
983 }
955#ifdef CONFIG_XFRM_SUB_POLICY 984#ifdef CONFIG_XFRM_SUB_POLICY
956end: 985end:
957#endif 986#endif
958 if ((*objp = (void *) pol) != NULL) 987 if ((*objp = (void *) pol) != NULL)
959 *obj_refp = &pol->refcnt; 988 *obj_refp = &pol->refcnt;
989 return err;
960} 990}
961 991
962static inline int policy_to_flow_dir(int dir) 992static inline int policy_to_flow_dir(int dir)
@@ -986,12 +1016,16 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
986 sk->sk_family); 1016 sk->sk_family);
987 int err = 0; 1017 int err = 0;
988 1018
989 if (match) 1019 if (match) {
990 err = security_xfrm_policy_lookup(pol, fl->secid, policy_to_flow_dir(dir)); 1020 err = security_xfrm_policy_lookup(pol, fl->secid,
991 1021 policy_to_flow_dir(dir));
992 if (match && !err) 1022 if (!err)
993 xfrm_pol_hold(pol); 1023 xfrm_pol_hold(pol);
994 else 1024 else if (err == -ESRCH)
1025 pol = NULL;
1026 else
1027 pol = ERR_PTR(err);
1028 } else
995 pol = NULL; 1029 pol = NULL;
996 } 1030 }
997 read_unlock_bh(&xfrm_policy_lock); 1031 read_unlock_bh(&xfrm_policy_lock);
@@ -1283,8 +1317,11 @@ restart:
1283 pol_dead = 0; 1317 pol_dead = 0;
1284 xfrm_nr = 0; 1318 xfrm_nr = 0;
1285 1319
1286 if (sk && sk->sk_policy[1]) 1320 if (sk && sk->sk_policy[1]) {
1287 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1321 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1322 if (IS_ERR(policy))
1323 return PTR_ERR(policy);
1324 }
1288 1325
1289 if (!policy) { 1326 if (!policy) {
1290 /* To accelerate a bit... */ 1327 /* To accelerate a bit... */
@@ -1294,6 +1331,8 @@ restart:
1294 1331
1295 policy = flow_cache_lookup(fl, dst_orig->ops->family, 1332 policy = flow_cache_lookup(fl, dst_orig->ops->family,
1296 dir, xfrm_policy_lookup); 1333 dir, xfrm_policy_lookup);
1334 if (IS_ERR(policy))
1335 return PTR_ERR(policy);
1297 } 1336 }
1298 1337
1299 if (!policy) 1338 if (!policy)
@@ -1340,6 +1379,10 @@ restart:
1340 fl, family, 1379 fl, family,
1341 XFRM_POLICY_OUT); 1380 XFRM_POLICY_OUT);
1342 if (pols[1]) { 1381 if (pols[1]) {
1382 if (IS_ERR(pols[1])) {
1383 err = PTR_ERR(pols[1]);
1384 goto error;
1385 }
1343 if (pols[1]->action == XFRM_POLICY_BLOCK) { 1386 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1344 err = -EPERM; 1387 err = -EPERM;
1345 goto error; 1388 goto error;
@@ -1571,13 +1614,19 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1571 } 1614 }
1572 1615
1573 pol = NULL; 1616 pol = NULL;
1574 if (sk && sk->sk_policy[dir]) 1617 if (sk && sk->sk_policy[dir]) {
1575 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 1618 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
1619 if (IS_ERR(pol))
1620 return 0;
1621 }
1576 1622
1577 if (!pol) 1623 if (!pol)
1578 pol = flow_cache_lookup(&fl, family, fl_dir, 1624 pol = flow_cache_lookup(&fl, family, fl_dir,
1579 xfrm_policy_lookup); 1625 xfrm_policy_lookup);
1580 1626
1627 if (IS_ERR(pol))
1628 return 0;
1629
1581 if (!pol) { 1630 if (!pol) {
1582 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 1631 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
1583 xfrm_secpath_reject(xerr_idx, skb, &fl); 1632 xfrm_secpath_reject(xerr_idx, skb, &fl);
@@ -1596,6 +1645,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1596 &fl, family, 1645 &fl, family,
1597 XFRM_POLICY_IN); 1646 XFRM_POLICY_IN);
1598 if (pols[1]) { 1647 if (pols[1]) {
1648 if (IS_ERR(pols[1]))
1649 return 0;
1599 pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec; 1650 pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec;
1600 npols ++; 1651 npols ++;
1601 } 1652 }
@@ -1703,7 +1754,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1703 1754
1704static int stale_bundle(struct dst_entry *dst) 1755static int stale_bundle(struct dst_entry *dst)
1705{ 1756{
1706 return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0); 1757 return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
1707} 1758}
1708 1759
1709void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 1760void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -1825,7 +1876,8 @@ EXPORT_SYMBOL(xfrm_init_pmtu);
1825 * still valid. 1876 * still valid.
1826 */ 1877 */
1827 1878
1828int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int strict) 1879int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
1880 struct flowi *fl, int family, int strict)
1829{ 1881{
1830 struct dst_entry *dst = &first->u.dst; 1882 struct dst_entry *dst = &first->u.dst;
1831 struct xfrm_dst *last; 1883 struct xfrm_dst *last;
@@ -1842,7 +1894,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int str
1842 1894
1843 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1895 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1844 return 0; 1896 return 0;
1845 if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm)) 1897 if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol))
1846 return 0; 1898 return 0;
1847 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1899 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1848 return 0; 1900 return 0;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9f63edd39346..84bbf8474f3e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -63,14 +63,15 @@ static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
63 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask); 63 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
64} 64}
65 65
66static inline unsigned int xfrm_src_hash(xfrm_address_t *addr, 66static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
67 xfrm_address_t *saddr,
67 unsigned short family) 68 unsigned short family)
68{ 69{
69 return __xfrm_src_hash(addr, family, xfrm_state_hmask); 70 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
70} 71}
71 72
72static inline unsigned int 73static inline unsigned int
73xfrm_spi_hash(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family) 74xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
74{ 75{
75 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask); 76 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
76} 77}
@@ -92,13 +93,17 @@ static void xfrm_hash_transfer(struct hlist_head *list,
92 nhashmask); 93 nhashmask);
93 hlist_add_head(&x->bydst, ndsttable+h); 94 hlist_add_head(&x->bydst, ndsttable+h);
94 95
95 h = __xfrm_src_hash(&x->props.saddr, x->props.family, 96 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
97 x->props.family,
96 nhashmask); 98 nhashmask);
97 hlist_add_head(&x->bysrc, nsrctable+h); 99 hlist_add_head(&x->bysrc, nsrctable+h);
98 100
99 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, 101 if (x->id.spi) {
100 x->props.family, nhashmask); 102 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
101 hlist_add_head(&x->byspi, nspitable+h); 103 x->id.proto, x->props.family,
104 nhashmask);
105 hlist_add_head(&x->byspi, nspitable+h);
106 }
102 } 107 }
103} 108}
104 109
@@ -421,7 +426,7 @@ xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
421 return 0; 426 return 0;
422} 427}
423 428
424static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family) 429static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
425{ 430{
426 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family); 431 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
427 struct xfrm_state *x; 432 struct xfrm_state *x;
@@ -455,7 +460,7 @@ static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8
455 460
456static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family) 461static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
457{ 462{
458 unsigned int h = xfrm_src_hash(saddr, family); 463 unsigned int h = xfrm_src_hash(daddr, saddr, family);
459 struct xfrm_state *x; 464 struct xfrm_state *x;
460 struct hlist_node *entry; 465 struct hlist_node *entry;
461 466
@@ -584,7 +589,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
584 if (km_query(x, tmpl, pol) == 0) { 589 if (km_query(x, tmpl, pol) == 0) {
585 x->km.state = XFRM_STATE_ACQ; 590 x->km.state = XFRM_STATE_ACQ;
586 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 591 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
587 h = xfrm_src_hash(saddr, family); 592 h = xfrm_src_hash(daddr, saddr, family);
588 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 593 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
589 if (x->id.spi) { 594 if (x->id.spi) {
590 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family); 595 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
@@ -609,6 +614,14 @@ out:
609 return x; 614 return x;
610} 615}
611 616
617static void xfrm_hash_grow_check(int have_hash_collision)
618{
619 if (have_hash_collision &&
620 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
621 xfrm_state_num > xfrm_state_hmask)
622 schedule_work(&xfrm_hash_work);
623}
624
612static void __xfrm_state_insert(struct xfrm_state *x) 625static void __xfrm_state_insert(struct xfrm_state *x)
613{ 626{
614 unsigned int h; 627 unsigned int h;
@@ -619,10 +632,10 @@ static void __xfrm_state_insert(struct xfrm_state *x)
619 x->props.reqid, x->props.family); 632 x->props.reqid, x->props.family);
620 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 633 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
621 634
622 h = xfrm_src_hash(&x->props.saddr, x->props.family); 635 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
623 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 636 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
624 637
625 if (xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY)) { 638 if (x->id.spi) {
626 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, 639 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
627 x->props.family); 640 x->props.family);
628 641
@@ -637,10 +650,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
637 650
638 xfrm_state_num++; 651 xfrm_state_num++;
639 652
640 if (x->bydst.next != NULL && 653 xfrm_hash_grow_check(x->bydst.next != NULL);
641 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
642 xfrm_state_num > xfrm_state_hmask)
643 schedule_work(&xfrm_hash_work);
644} 654}
645 655
646/* xfrm_state_lock is held */ 656/* xfrm_state_lock is held */
@@ -745,9 +755,13 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
745 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ; 755 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
746 add_timer(&x->timer); 756 add_timer(&x->timer);
747 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 757 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
748 h = xfrm_src_hash(saddr, family); 758 h = xfrm_src_hash(daddr, saddr, family);
749 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 759 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
750 wake_up(&km_waitq); 760 wake_up(&km_waitq);
761
762 xfrm_state_num++;
763
764 xfrm_hash_grow_check(x->bydst.next != NULL);
751 } 765 }
752 766
753 return x; 767 return x;
@@ -916,7 +930,7 @@ err:
916EXPORT_SYMBOL(xfrm_state_check); 930EXPORT_SYMBOL(xfrm_state_check);
917 931
918struct xfrm_state * 932struct xfrm_state *
919xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, 933xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
920 unsigned short family) 934 unsigned short family)
921{ 935{
922 struct xfrm_state *x; 936 struct xfrm_state *x;
@@ -1040,7 +1054,7 @@ u32 xfrm_get_acqseq(void)
1040EXPORT_SYMBOL(xfrm_get_acqseq); 1054EXPORT_SYMBOL(xfrm_get_acqseq);
1041 1055
1042void 1056void
1043xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi) 1057xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1044{ 1058{
1045 unsigned int h; 1059 unsigned int h;
1046 struct xfrm_state *x0; 1060 struct xfrm_state *x0;
@@ -1057,10 +1071,10 @@ xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
1057 x->id.spi = minspi; 1071 x->id.spi = minspi;
1058 } else { 1072 } else {
1059 u32 spi = 0; 1073 u32 spi = 0;
1060 minspi = ntohl(minspi); 1074 u32 low = ntohl(minspi);
1061 maxspi = ntohl(maxspi); 1075 u32 high = ntohl(maxspi);
1062 for (h=0; h<maxspi-minspi+1; h++) { 1076 for (h=0; h<high-low+1; h++) {
1063 spi = minspi + net_random()%(maxspi-minspi+1); 1077 spi = low + net_random()%(high-low+1);
1064 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family); 1078 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1065 if (x0 == NULL) { 1079 if (x0 == NULL) {
1066 x->id.spi = htonl(spi); 1080 x->id.spi = htonl(spi);
@@ -1180,11 +1194,10 @@ static void xfrm_replay_timer_handler(unsigned long data)
1180 spin_unlock(&x->lock); 1194 spin_unlock(&x->lock);
1181} 1195}
1182 1196
1183int xfrm_replay_check(struct xfrm_state *x, u32 seq) 1197int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1184{ 1198{
1185 u32 diff; 1199 u32 diff;
1186 1200 u32 seq = ntohl(net_seq);
1187 seq = ntohl(seq);
1188 1201
1189 if (unlikely(seq == 0)) 1202 if (unlikely(seq == 0))
1190 return -EINVAL; 1203 return -EINVAL;
@@ -1206,11 +1219,10 @@ int xfrm_replay_check(struct xfrm_state *x, u32 seq)
1206} 1219}
1207EXPORT_SYMBOL(xfrm_replay_check); 1220EXPORT_SYMBOL(xfrm_replay_check);
1208 1221
1209void xfrm_replay_advance(struct xfrm_state *x, u32 seq) 1222void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1210{ 1223{
1211 u32 diff; 1224 u32 diff;
1212 1225 u32 seq = ntohl(net_seq);
1213 seq = ntohl(seq);
1214 1226
1215 if (seq > x->replay.seq) { 1227 if (seq > x->replay.seq) {
1216 diff = seq - x->replay.seq; 1228 diff = seq - x->replay.seq;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c59a78d2923a..2b2e59d8ffbc 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -211,6 +211,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
211 case XFRM_MODE_TRANSPORT: 211 case XFRM_MODE_TRANSPORT:
212 case XFRM_MODE_TUNNEL: 212 case XFRM_MODE_TUNNEL:
213 case XFRM_MODE_ROUTEOPTIMIZATION: 213 case XFRM_MODE_ROUTEOPTIMIZATION:
214 case XFRM_MODE_BEET:
214 break; 215 break;
215 216
216 default: 217 default:
@@ -1991,15 +1992,6 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
1991 xp->type = XFRM_POLICY_TYPE_MAIN; 1992 xp->type = XFRM_POLICY_TYPE_MAIN;
1992 copy_templates(xp, ut, nr); 1993 copy_templates(xp, ut, nr);
1993 1994
1994 if (!xp->security) {
1995 int err = security_xfrm_sock_policy_alloc(xp, sk);
1996 if (err) {
1997 kfree(xp);
1998 *dir = err;
1999 return NULL;
2000 }
2001 }
2002
2003 *dir = p->dir; 1995 *dir = p->dir;
2004 1996
2005 return xp; 1997 return xp;