aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c103
-rw-r--r--net/8021q/vlan.h17
-rw-r--r--net/8021q/vlan_core.c37
-rw-r--r--net/8021q/vlan_dev.c57
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/8021q/vlanproc.c13
-rw-r--r--net/9p/client.c5
-rw-r--r--net/9p/trans_fd.c4
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/appletalk/ddp.c132
-rw-r--r--net/atm/ioctl.c177
-rw-r--r--net/atm/pvc.c3
-rw-r--r--net/atm/svc.c9
-rw-r--r--net/ax25/af_ax25.c12
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bluetooth/bnep/sock.c3
-rw-r--r--net/bluetooth/cmtp/sock.c3
-rw-r--r--net/bluetooth/hci_conn.c1
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/hidp/sock.c3
-rw-r--r--net/bluetooth/l2cap.c38
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_ioctl.c4
-rw-r--r--net/bridge/br_sysfs_br.c6
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/can/af_can.c18
-rw-r--r--net/can/bcm.c22
-rw-r--r--net/can/raw.c1
-rw-r--r--net/compat.c8
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c10
-rw-r--r--net/core/dev.c428
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/fib_rules.c107
-rw-r--r--net/core/link_watch.c94
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/net_namespace.c272
-rw-r--r--net/core/pktgen.c51
-rw-r--r--net/core/rtnetlink.c90
-rw-r--r--net/core/skb_dma_map.c65
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c17
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dcb/dcbnl.c6
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/decnet/af_decnet.c11
-rw-r--r--net/decnet/dn_dev.c53
-rw-r--r--net/decnet/dn_fib.c10
-rw-r--r--net/decnet/dn_route.c10
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/decnet/dn_table.c7
-rw-r--r--net/decnet/sysctl_net_decnet.c7
-rw-r--r--net/econet/af_econet.c5
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ieee802154/Makefile4
-rw-r--r--net/ieee802154/af_ieee802154.c4
-rw-r--r--net/ieee802154/ieee802154.h53
-rw-r--r--net/ieee802154/netlink.c613
-rw-r--r--net/ieee802154/nl-mac.c617
-rw-r--r--net/ieee802154/nl-phy.c344
-rw-r--r--net/ieee802154/nl_policy.c2
-rw-r--r--net/ieee802154/wpan-class.c75
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/devinet.c158
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c29
-rw-r--r--net/ipv4/fib_rules.c14
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/icmp.c9
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inet_lro.c36
-rw-r--r--net/ipv4/inet_timewait_sock.c45
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_fragment.c13
-rw-r--r--net/ipv4/ip_gre.c56
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipip.c58
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter.c8
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c34
-rw-r--r--net/ipv4/raw.c24
-rw-r--r--net/ipv4/route.c107
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/sysctl_net_ipv4.c12
-rw-r--r--net/ipv4/tcp.c293
-rw-r--r--net/ipv4/tcp_htcp.c10
-rw-r--r--net/ipv4/tcp_input.c85
-rw-r--r--net/ipv4/tcp_ipv4.c116
-rw-r--r--net/ipv4/tcp_lp.c4
-rw-r--r--net/ipv4/tcp_minisocks.c77
-rw-r--r--net/ipv4/tcp_output.c307
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/tcp_veno.c5
-rw-r--r--net/ipv4/tcp_yeah.c4
-rw-r--r--net/ipv4/udp.c345
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/addrconf.c236
-rw-r--r--net/ipv6/af_inet6.c22
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/anycast.c35
-rw-r--r--net/ipv6/datagram.c14
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fib6_rules.c24
-rw-r--r--net/ipv6/ip6_flowlabel.c17
-rw-r--r--net/ipv6/ip6_tunnel.c38
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/netfilter/ip6_queue.c2
-rw-r--r--net/ipv6/raw.c24
-rw-r--r--net/ipv6/reassembly.c17
-rw-r--r--net/ipv6/sit.c69
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/tcp_ipv6.c104
-rw-r--r--net/ipv6/udp.c223
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipx/af_ipx.c59
-rw-r--r--net/irda/af_irda.c338
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c1
-rw-r--r--net/irda/irlan/irlan_common.c1
-rw-r--r--net/irda/irlan/irlan_eth.c1
-rw-r--r--net/irda/irnet/irnet_irda.c5
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/iucv/iucv.c16
-rw-r--r--net/key/af_key.c30
-rw-r--r--net/llc/af_llc.c7
-rw-r--r--net/mac80211/Kconfig13
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/agg-rx.c18
-rw-r--r--net/mac80211/agg-tx.c141
-rw-r--r--net/mac80211/cfg.c61
-rw-r--r--net/mac80211/debugfs.c75
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_key.c44
-rw-r--r--net/mac80211/debugfs_netdev.c176
-rw-r--r--net/mac80211/debugfs_sta.c67
-rw-r--r--net/mac80211/driver-ops.h5
-rw-r--r--net/mac80211/driver-trace.h9
-rw-r--r--net/mac80211/ht.c12
-rw-r--r--net/mac80211/ibss.c19
-rw-r--r--net/mac80211/ieee80211_i.h186
-rw-r--r--net/mac80211/iface.c16
-rw-r--r--net/mac80211/key.h12
-rw-r--r--net/mac80211/main.c350
-rw-r--r--net/mac80211/mesh.c149
-rw-r--r--net/mac80211/mesh.h30
-rw-r--r--net/mac80211/mesh_hwmp.c415
-rw-r--r--net/mac80211/mesh_pathtbl.c21
-rw-r--r--net/mac80211/mesh_plink.c58
-rw-r--r--net/mac80211/mlme.c34
-rw-r--r--net/mac80211/rate.c19
-rw-r--r--net/mac80211/rate.h9
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c539
-rw-r--r--net/mac80211/scan.c119
-rw-r--r--net/mac80211/spectmgmt.c2
-rw-r--r--net/mac80211/sta_info.c175
-rw-r--r--net/mac80211/sta_info.h51
-rw-r--r--net/mac80211/status.c337
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/tx.c192
-rw-r--r--net/mac80211/util.c41
-rw-r--r--net/mac80211/wep.c8
-rw-r--r--net/mac80211/wpa.c25
-rw-r--r--net/netfilter/nf_conntrack_core.c9
-rw-r--r--net/netfilter/nf_conntrack_expect.c6
-rw-r--r--net/netfilter/nf_conntrack_ftp.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c33
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c22
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c64
-rw-r--r--net/netfilter/nf_log.c18
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/xt_connlimit.c10
-rw-r--r--net/netfilter/xt_limit.c2
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c8
-rw-r--r--net/netlink/af_netlink.c9
-rw-r--r--net/netrom/af_netrom.c5
-rw-r--r--net/netrom/nr_route.c15
-rw-r--r--net/packet/af_packet.c45
-rw-r--r--net/phonet/af_phonet.c26
-rw-r--r--net/phonet/pep.c33
-rw-r--r--net/phonet/pn_dev.c140
-rw-r--r--net/phonet/pn_netlink.c12
-rw-r--r--net/phonet/socket.c83
-rw-r--r--net/rds/af_rds.c14
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_cm.c6
-rw-r--r--net/rds/ib_rdma.c9
-rw-r--r--net/rds/ib_recv.c47
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/iw.h2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/iw_rdma.c9
-rw-r--r--net/rds/iw_recv.c47
-rw-r--r--net/rds/iw_send.c7
-rw-r--r--net/rds/message.c3
-rw-r--r--net/rds/rdma.c36
-rw-r--r--net/rds/rdma.h1
-rw-r--r--net/rds/recv.c11
-rw-r--r--net/rds/send.c27
-rw-r--r--net/rds/threads.c4
-rw-r--r--net/rfkill/core.c5
-rw-r--r--net/rose/af_rose.c5
-rw-r--r--net/rose/rose_route.c44
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_mirred.c107
-rw-r--r--net/sched/cls_api.c8
-rw-r--r--net/sched/cls_flow.c2
-rw-r--r--net/sched/cls_rsvp.h28
-rw-r--r--net/sched/em_meta.c13
-rw-r--r--net/sched/sch_api.c17
-rw-r--r--net/sched/sch_generic.c18
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sched/sch_teql.c11
-rw-r--r--net/sctp/associola.c31
-rw-r--r--net/sctp/chunk.c15
-rw-r--r--net/sctp/ipv6.c21
-rw-r--r--net/sctp/output.c50
-rw-r--r--net/sctp/outqueue.c36
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_sideeffect.c6
-rw-r--r--net/sctp/sm_statefuns.c41
-rw-r--r--net/sctp/socket.c379
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/sctp/transport.c53
-rw-r--r--net/socket.c608
-rw-r--r--net/sunrpc/addr.c18
-rw-r--r--net/sunrpc/auth.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/cache.c5
-rw-r--r--net/sunrpc/svc.c5
-rw-r--r--net/sunrpc/svc_xprt.c8
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c4
-rw-r--r--net/sunrpc/svcsock.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c7
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c4
-rw-r--r--net/tipc/cluster.c16
-rw-r--r--net/tipc/link.c12
-rw-r--r--net/tipc/socket.c20
-rw-r--r--net/tipc/subscr.c6
-rw-r--r--net/unix/af_unix.c13
-rw-r--r--net/wimax/op-msg.c2
-rw-r--r--net/wimax/op-rfkill.c18
-rw-r--r--net/wimax/stack.c11
-rw-r--r--net/wireless/Kconfig6
-rw-r--r--net/wireless/core.c21
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/debugfs.c15
-rw-r--r--net/wireless/debugfs.h3
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/mlme.c63
-rw-r--r--net/wireless/nl80211.c280
-rw-r--r--net/wireless/reg.c13
-rw-r--r--net/wireless/scan.c50
-rw-r--r--net/wireless/sme.c24
-rw-r--r--net/wireless/util.c40
-rw-r--r--net/wireless/wext-compat.c55
-rw-r--r--net/wireless/wext-core.c5
-rw-r--r--net/x25/af_x25.c111
-rw-r--r--net/x25/x25_route.c4
-rw-r--r--net/x25/x25_subr.c6
-rw-r--r--net/xfrm/xfrm_algo.c35
-rw-r--r--net/xfrm/xfrm_state.c32
-rw-r--r--net/xfrm/xfrm_user.c147
285 files changed, 8719 insertions, 5234 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 511afe72af31..ec3769295dac 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -41,7 +41,7 @@
41 41
42/* Global VLAN variables */ 42/* Global VLAN variables */
43 43
44int vlan_net_id; 44int vlan_net_id __read_mostly;
45 45
46/* Our listing of VLAN group(s) */ 46/* Our listing of VLAN group(s) */
47static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; 47static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
@@ -161,10 +161,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
161 161
162 grp->nr_vlans--; 162 grp->nr_vlans--;
163 163
164 if (!grp->killall) { 164 vlan_group_set_device(grp, vlan_id, NULL);
165 vlan_group_set_device(grp, vlan_id, NULL); 165 if (!grp->killall)
166 synchronize_net(); 166 synchronize_net();
167 } 167
168 unregister_netdevice_queue(dev, head); 168 unregister_netdevice_queue(dev, head);
169 169
170 /* If the group is now empty, kill off the group. */ 170 /* If the group is now empty, kill off the group. */
@@ -184,34 +184,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
184 dev_put(real_dev); 184 dev_put(real_dev);
185} 185}
186 186
187void unregister_vlan_dev_alls(struct vlan_group *grp)
188{
189 LIST_HEAD(list);
190 int i;
191 struct net_device *vlandev;
192 struct vlan_group save;
193
194 memcpy(&save, grp, sizeof(save));
195 memset(&grp->vlan_devices_arrays, 0, sizeof(grp->vlan_devices_arrays));
196 grp->killall = 1;
197
198 synchronize_net();
199
200 /* Delete all VLANs for this dev. */
201 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
202 vlandev = vlan_group_get_device(&save, i);
203 if (!vlandev)
204 continue;
205
206 unregister_vlan_dev(vlandev, &list);
207 if (grp->nr_vlans == 0)
208 break;
209 }
210 unregister_netdevice_many(&list);
211 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
212 kfree(save.vlan_devices_arrays[i]);
213}
214
215static void vlan_transfer_operstate(const struct net_device *dev, 187static void vlan_transfer_operstate(const struct net_device *dev,
216 struct net_device *vlandev) 188 struct net_device *vlandev)
217{ 189{
@@ -310,8 +282,11 @@ out_uninit_applicant:
310 if (ngrp) 282 if (ngrp)
311 vlan_gvrp_uninit_applicant(real_dev); 283 vlan_gvrp_uninit_applicant(real_dev);
312out_free_group: 284out_free_group:
313 if (ngrp) 285 if (ngrp) {
314 vlan_group_free(ngrp); 286 hlist_del_rcu(&ngrp->hlist);
287 /* Free the group, after all cpu's are done. */
288 call_rcu(&ngrp->rcu, vlan_rcu_free);
289 }
315 return err; 290 return err;
316} 291}
317 292
@@ -456,6 +431,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
456 struct vlan_group *grp; 431 struct vlan_group *grp;
457 int i, flgs; 432 int i, flgs;
458 struct net_device *vlandev; 433 struct net_device *vlandev;
434 struct vlan_dev_info *vlan;
435 LIST_HEAD(list);
459 436
460 if (is_vlan_dev(dev)) 437 if (is_vlan_dev(dev))
461 __vlan_device_event(dev, event); 438 __vlan_device_event(dev, event);
@@ -531,7 +508,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
531 if (!(flgs & IFF_UP)) 508 if (!(flgs & IFF_UP))
532 continue; 509 continue;
533 510
534 dev_change_flags(vlandev, flgs & ~IFF_UP); 511 vlan = vlan_dev_info(vlandev);
512 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
513 dev_change_flags(vlandev, flgs & ~IFF_UP);
535 vlan_transfer_operstate(dev, vlandev); 514 vlan_transfer_operstate(dev, vlandev);
536 } 515 }
537 break; 516 break;
@@ -547,13 +526,30 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
547 if (flgs & IFF_UP) 526 if (flgs & IFF_UP)
548 continue; 527 continue;
549 528
550 dev_change_flags(vlandev, flgs | IFF_UP); 529 vlan = vlan_dev_info(vlandev);
530 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
531 dev_change_flags(vlandev, flgs | IFF_UP);
551 vlan_transfer_operstate(dev, vlandev); 532 vlan_transfer_operstate(dev, vlandev);
552 } 533 }
553 break; 534 break;
554 535
555 case NETDEV_UNREGISTER: 536 case NETDEV_UNREGISTER:
556 unregister_vlan_dev_alls(grp); 537 /* Delete all VLANs for this dev. */
538 grp->killall = 1;
539
540 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
541 vlandev = vlan_group_get_device(grp, i);
542 if (!vlandev)
543 continue;
544
545 /* unregistration of last vlan destroys group, abort
546 * afterwards */
547 if (grp->nr_vlans == 1)
548 i = VLAN_GROUP_ARRAY_LEN;
549
550 unregister_vlan_dev(vlandev, &list);
551 }
552 unregister_netdevice_many(&list);
557 break; 553 break;
558 } 554 }
559 555
@@ -690,47 +686,26 @@ out:
690 686
691static int vlan_init_net(struct net *net) 687static int vlan_init_net(struct net *net)
692{ 688{
689 struct vlan_net *vn = net_generic(net, vlan_net_id);
693 int err; 690 int err;
694 struct vlan_net *vn;
695
696 err = -ENOMEM;
697 vn = kzalloc(sizeof(struct vlan_net), GFP_KERNEL);
698 if (vn == NULL)
699 goto err_alloc;
700
701 err = net_assign_generic(net, vlan_net_id, vn);
702 if (err < 0)
703 goto err_assign;
704 691
705 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; 692 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
706 693
707 err = vlan_proc_init(net); 694 err = vlan_proc_init(net);
708 if (err < 0)
709 goto err_proc;
710
711 return 0;
712 695
713err_proc:
714 /* nothing */
715err_assign:
716 kfree(vn);
717err_alloc:
718 return err; 696 return err;
719} 697}
720 698
721static void vlan_exit_net(struct net *net) 699static void vlan_exit_net(struct net *net)
722{ 700{
723 struct vlan_net *vn;
724
725 vn = net_generic(net, vlan_net_id);
726 rtnl_kill_links(net, &vlan_link_ops);
727 vlan_proc_cleanup(net); 701 vlan_proc_cleanup(net);
728 kfree(vn);
729} 702}
730 703
731static struct pernet_operations vlan_net_ops = { 704static struct pernet_operations vlan_net_ops = {
732 .init = vlan_init_net, 705 .init = vlan_init_net,
733 .exit = vlan_exit_net, 706 .exit = vlan_exit_net,
707 .id = &vlan_net_id,
708 .size = sizeof(struct vlan_net),
734}; 709};
735 710
736static int __init vlan_proto_init(void) 711static int __init vlan_proto_init(void)
@@ -740,7 +715,7 @@ static int __init vlan_proto_init(void)
740 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); 715 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
741 pr_info("All bugs added by %s\n", vlan_buggyright); 716 pr_info("All bugs added by %s\n", vlan_buggyright);
742 717
743 err = register_pernet_gen_device(&vlan_net_id, &vlan_net_ops); 718 err = register_pernet_subsys(&vlan_net_ops);
744 if (err < 0) 719 if (err < 0)
745 goto err0; 720 goto err0;
746 721
@@ -765,7 +740,7 @@ err4:
765err3: 740err3:
766 unregister_netdevice_notifier(&vlan_notifier_block); 741 unregister_netdevice_notifier(&vlan_notifier_block);
767err2: 742err2:
768 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); 743 unregister_pernet_subsys(&vlan_net_ops);
769err0: 744err0:
770 return err; 745 return err;
771} 746}
@@ -785,7 +760,7 @@ static void __exit vlan_cleanup_module(void)
785 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) 760 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++)
786 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 761 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
787 762
788 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); 763 unregister_pernet_subsys(&vlan_net_ops);
789 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 764 rcu_barrier(); /* Wait for completion of call_rcu()'s */
790 765
791 vlan_gvrp_uninit(); 766 vlan_gvrp_uninit();
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 68f9290e6837..5685296017e9 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -16,6 +16,21 @@ struct vlan_priority_tci_mapping {
16 struct vlan_priority_tci_mapping *next; 16 struct vlan_priority_tci_mapping *next;
17}; 17};
18 18
19
20/**
21 * struct vlan_rx_stats - VLAN percpu rx stats
22 * @rx_packets: number of received packets
23 * @rx_bytes: number of received bytes
24 * @multicast: number of received multicast packets
25 * @rx_errors: number of errors
26 */
27struct vlan_rx_stats {
28 unsigned long rx_packets;
29 unsigned long rx_bytes;
30 unsigned long multicast;
31 unsigned long rx_errors;
32};
33
19/** 34/**
20 * struct vlan_dev_info - VLAN private device data 35 * struct vlan_dev_info - VLAN private device data
21 * @nr_ingress_mappings: number of ingress priority mappings 36 * @nr_ingress_mappings: number of ingress priority mappings
@@ -29,6 +44,7 @@ struct vlan_priority_tci_mapping {
29 * @dent: proc dir entry 44 * @dent: proc dir entry
30 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX 45 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
31 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX 46 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
47 * @vlan_rx_stats: ptr to percpu rx stats
32 */ 48 */
33struct vlan_dev_info { 49struct vlan_dev_info {
34 unsigned int nr_ingress_mappings; 50 unsigned int nr_ingress_mappings;
@@ -45,6 +61,7 @@ struct vlan_dev_info {
45 struct proc_dir_entry *dent; 61 struct proc_dir_entry *dent;
46 unsigned long cnt_inc_headroom_on_tx; 62 unsigned long cnt_inc_headroom_on_tx;
47 unsigned long cnt_encap_on_xmit; 63 unsigned long cnt_encap_on_xmit;
64 struct vlan_rx_stats *vlan_rx_stats;
48}; 65};
49 66
50static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 7f7de1a04de6..e75a2f3b10af 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,7 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
15 goto drop; 15 goto drop;
16 16
17 skb->vlan_tci = vlan_tci; 17 __vlan_hwaccel_put_tag(skb, vlan_tci);
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19 19
20 if (!skb->dev) 20 if (!skb->dev)
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx);
31int vlan_hwaccel_do_receive(struct sk_buff *skb) 31int vlan_hwaccel_do_receive(struct sk_buff *skb)
32{ 32{
33 struct net_device *dev = skb->dev; 33 struct net_device *dev = skb->dev;
34 struct net_device_stats *stats; 34 struct vlan_rx_stats *rx_stats;
35 35
36 skb->dev = vlan_dev_info(dev)->real_dev; 36 skb->dev = vlan_dev_info(dev)->real_dev;
37 netif_nit_deliver(skb); 37 netif_nit_deliver(skb);
@@ -40,15 +40,17 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
40 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 40 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
41 skb->vlan_tci = 0; 41 skb->vlan_tci = 0;
42 42
43 stats = &dev->stats; 43 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
44 stats->rx_packets++; 44 smp_processor_id());
45 stats->rx_bytes += skb->len; 45
46 rx_stats->rx_packets++;
47 rx_stats->rx_bytes += skb->len;
46 48
47 switch (skb->pkt_type) { 49 switch (skb->pkt_type) {
48 case PACKET_BROADCAST: 50 case PACKET_BROADCAST:
49 break; 51 break;
50 case PACKET_MULTICAST: 52 case PACKET_MULTICAST:
51 stats->multicast++; 53 rx_stats->multicast++;
52 break; 54 break;
53 case PACKET_OTHERHOST: 55 case PACKET_OTHERHOST:
54 /* Our lower layer thinks this is not local, let's make sure. 56 /* Our lower layer thinks this is not local, let's make sure.
@@ -74,15 +76,16 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
74} 76}
75EXPORT_SYMBOL(vlan_dev_vlan_id); 77EXPORT_SYMBOL(vlan_dev_vlan_id);
76 78
77static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 79static gro_result_t
78 unsigned int vlan_tci, struct sk_buff *skb) 80vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
81 unsigned int vlan_tci, struct sk_buff *skb)
79{ 82{
80 struct sk_buff *p; 83 struct sk_buff *p;
81 84
82 if (skb_bond_should_drop(skb)) 85 if (skb_bond_should_drop(skb))
83 goto drop; 86 goto drop;
84 87
85 skb->vlan_tci = vlan_tci; 88 __vlan_hwaccel_put_tag(skb, vlan_tci);
86 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 89 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
87 90
88 if (!skb->dev) 91 if (!skb->dev)
@@ -101,11 +104,12 @@ drop:
101 return GRO_DROP; 104 return GRO_DROP;
102} 105}
103 106
104int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 107gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
105 unsigned int vlan_tci, struct sk_buff *skb) 108 unsigned int vlan_tci, struct sk_buff *skb)
106{ 109{
107 if (netpoll_rx_on(skb)) 110 if (netpoll_rx_on(skb))
108 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); 111 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
112 ? GRO_DROP : GRO_NORMAL;
109 113
110 skb_gro_reset_offset(skb); 114 skb_gro_reset_offset(skb);
111 115
@@ -113,17 +117,18 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
113} 117}
114EXPORT_SYMBOL(vlan_gro_receive); 118EXPORT_SYMBOL(vlan_gro_receive);
115 119
116int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 120gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
117 unsigned int vlan_tci) 121 unsigned int vlan_tci)
118{ 122{
119 struct sk_buff *skb = napi_frags_skb(napi); 123 struct sk_buff *skb = napi_frags_skb(napi);
120 124
121 if (!skb) 125 if (!skb)
122 return NET_RX_DROP; 126 return GRO_DROP;
123 127
124 if (netpoll_rx_on(skb)) { 128 if (netpoll_rx_on(skb)) {
125 skb->protocol = eth_type_trans(skb, skb->dev); 129 skb->protocol = eth_type_trans(skb, skb->dev);
126 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); 130 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
131 ? GRO_DROP : GRO_NORMAL;
127 } 132 }
128 133
129 return napi_frags_finish(napi, skb, 134 return napi_frags_finish(napi, skb,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 790fd55ec318..b7889782047e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -140,7 +140,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
140 struct packet_type *ptype, struct net_device *orig_dev) 140 struct packet_type *ptype, struct net_device *orig_dev)
141{ 141{
142 struct vlan_hdr *vhdr; 142 struct vlan_hdr *vhdr;
143 struct net_device_stats *stats; 143 struct vlan_rx_stats *rx_stats;
144 u16 vlan_id; 144 u16 vlan_id;
145 u16 vlan_tci; 145 u16 vlan_tci;
146 146
@@ -163,9 +163,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
163 goto err_unlock; 163 goto err_unlock;
164 } 164 }
165 165
166 stats = &skb->dev->stats; 166 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
167 stats->rx_packets++; 167 smp_processor_id());
168 stats->rx_bytes += skb->len; 168 rx_stats->rx_packets++;
169 rx_stats->rx_bytes += skb->len;
169 170
170 skb_pull_rcsum(skb, VLAN_HLEN); 171 skb_pull_rcsum(skb, VLAN_HLEN);
171 172
@@ -180,7 +181,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
180 break; 181 break;
181 182
182 case PACKET_MULTICAST: 183 case PACKET_MULTICAST:
183 stats->multicast++; 184 rx_stats->multicast++;
184 break; 185 break;
185 186
186 case PACKET_OTHERHOST: 187 case PACKET_OTHERHOST:
@@ -200,7 +201,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
200 201
201 skb = vlan_check_reorder_header(skb); 202 skb = vlan_check_reorder_header(skb);
202 if (!skb) { 203 if (!skb) {
203 stats->rx_errors++; 204 rx_stats->rx_errors++;
204 goto err_unlock; 205 goto err_unlock;
205 } 206 }
206 207
@@ -332,7 +333,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
332 } else 333 } else
333 txq->tx_dropped++; 334 txq->tx_dropped++;
334 335
335 return NETDEV_TX_OK; 336 return ret;
336} 337}
337 338
338static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 339static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
@@ -358,7 +359,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
358 } else 359 } else
359 txq->tx_dropped++; 360 txq->tx_dropped++;
360 361
361 return NETDEV_TX_OK; 362 return ret;
362} 363}
363 364
364static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 365static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
@@ -430,7 +431,8 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
430 struct vlan_dev_info *vlan = vlan_dev_info(dev); 431 struct vlan_dev_info *vlan = vlan_dev_info(dev);
431 u32 old_flags = vlan->flags; 432 u32 old_flags = vlan->flags;
432 433
433 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) 434 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
435 VLAN_FLAG_LOOSE_BINDING))
434 return -EINVAL; 436 return -EINVAL;
435 437
436 vlan->flags = (old_flags & ~mask) | (flags & mask); 438 vlan->flags = (old_flags & ~mask) | (flags & mask);
@@ -455,7 +457,8 @@ static int vlan_dev_open(struct net_device *dev)
455 struct net_device *real_dev = vlan->real_dev; 457 struct net_device *real_dev = vlan->real_dev;
456 int err; 458 int err;
457 459
458 if (!(real_dev->flags & IFF_UP)) 460 if (!(real_dev->flags & IFF_UP) &&
461 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
459 return -ENETDOWN; 462 return -ENETDOWN;
460 463
461 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 464 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
@@ -731,6 +734,11 @@ static int vlan_dev_init(struct net_device *dev)
731 subclass = 1; 734 subclass = 1;
732 735
733 vlan_dev_set_lockdep_class(dev, subclass); 736 vlan_dev_set_lockdep_class(dev, subclass);
737
738 vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
739 if (!vlan_dev_info(dev)->vlan_rx_stats)
740 return -ENOMEM;
741
734 return 0; 742 return 0;
735} 743}
736 744
@@ -740,6 +748,8 @@ static void vlan_dev_uninit(struct net_device *dev)
740 struct vlan_dev_info *vlan = vlan_dev_info(dev); 748 struct vlan_dev_info *vlan = vlan_dev_info(dev);
741 int i; 749 int i;
742 750
751 free_percpu(vlan->vlan_rx_stats);
752 vlan->vlan_rx_stats = NULL;
743 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 753 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
744 while ((pm = vlan->egress_priority_map[i]) != NULL) { 754 while ((pm = vlan->egress_priority_map[i]) != NULL) {
745 vlan->egress_priority_map[i] = pm->next; 755 vlan->egress_priority_map[i] = pm->next;
@@ -775,6 +785,31 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
775 return dev_ethtool_get_flags(vlan->real_dev); 785 return dev_ethtool_get_flags(vlan->real_dev);
776} 786}
777 787
788static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
789{
790 struct net_device_stats *stats = &dev->stats;
791
792 dev_txq_stats_fold(dev, stats);
793
794 if (vlan_dev_info(dev)->vlan_rx_stats) {
795 struct vlan_rx_stats *p, rx = {0};
796 int i;
797
798 for_each_possible_cpu(i) {
799 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
800 rx.rx_packets += p->rx_packets;
801 rx.rx_bytes += p->rx_bytes;
802 rx.rx_errors += p->rx_errors;
803 rx.multicast += p->multicast;
804 }
805 stats->rx_packets = rx.rx_packets;
806 stats->rx_bytes = rx.rx_bytes;
807 stats->rx_errors = rx.rx_errors;
808 stats->multicast = rx.multicast;
809 }
810 return stats;
811}
812
778static const struct ethtool_ops vlan_ethtool_ops = { 813static const struct ethtool_ops vlan_ethtool_ops = {
779 .get_settings = vlan_ethtool_get_settings, 814 .get_settings = vlan_ethtool_get_settings,
780 .get_drvinfo = vlan_ethtool_get_drvinfo, 815 .get_drvinfo = vlan_ethtool_get_drvinfo,
@@ -797,6 +832,7 @@ static const struct net_device_ops vlan_netdev_ops = {
797 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 832 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
798 .ndo_do_ioctl = vlan_dev_ioctl, 833 .ndo_do_ioctl = vlan_dev_ioctl,
799 .ndo_neigh_setup = vlan_dev_neigh_setup, 834 .ndo_neigh_setup = vlan_dev_neigh_setup,
835 .ndo_get_stats = vlan_dev_get_stats,
800#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 836#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
801 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 837 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
802 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 838 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -820,6 +856,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
820 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 856 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
821 .ndo_do_ioctl = vlan_dev_ioctl, 857 .ndo_do_ioctl = vlan_dev_ioctl,
822 .ndo_neigh_setup = vlan_dev_neigh_setup, 858 .ndo_neigh_setup = vlan_dev_neigh_setup,
859 .ndo_get_stats = vlan_dev_get_stats,
823#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 860#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
824 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 861 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
825 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 862 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index a91504850195..ddc105734af7 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -60,7 +60,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
60 if (data[IFLA_VLAN_FLAGS]) { 60 if (data[IFLA_VLAN_FLAGS]) {
61 flags = nla_data(data[IFLA_VLAN_FLAGS]); 61 flags = nla_data(data[IFLA_VLAN_FLAGS]);
62 if ((flags->flags & flags->mask) & 62 if ((flags->flags & flags->mask) &
63 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) 63 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
64 VLAN_FLAG_LOOSE_BINDING))
64 return -EINVAL; 65 return -EINVAL;
65 } 66 }
66 67
@@ -119,7 +120,7 @@ static int vlan_get_tx_queues(struct net *net,
119 return 0; 120 return 0;
120} 121}
121 122
122static int vlan_newlink(struct net_device *dev, 123static int vlan_newlink(struct net *src_net, struct net_device *dev,
123 struct nlattr *tb[], struct nlattr *data[]) 124 struct nlattr *tb[], struct nlattr *data[])
124{ 125{
125 struct vlan_dev_info *vlan = vlan_dev_info(dev); 126 struct vlan_dev_info *vlan = vlan_dev_info(dev);
@@ -131,7 +132,7 @@ static int vlan_newlink(struct net_device *dev,
131 132
132 if (!tb[IFLA_LINK]) 133 if (!tb[IFLA_LINK])
133 return -EINVAL; 134 return -EINVAL;
134 real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); 135 real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
135 if (!real_dev) 136 if (!real_dev)
136 return -ENODEV; 137 return -ENODEV;
137 138
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 6262c335f3c2..9ec1f057c03a 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -201,18 +201,17 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
201 201
202/* start read of /proc/net/vlan/config */ 202/* start read of /proc/net/vlan/config */
203static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) 203static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
204 __acquires(dev_base_lock) 204 __acquires(rcu)
205{ 205{
206 struct net_device *dev; 206 struct net_device *dev;
207 struct net *net = seq_file_net(seq); 207 struct net *net = seq_file_net(seq);
208 loff_t i = 1; 208 loff_t i = 1;
209 209
210 read_lock(&dev_base_lock); 210 rcu_read_lock();
211
212 if (*pos == 0) 211 if (*pos == 0)
213 return SEQ_START_TOKEN; 212 return SEQ_START_TOKEN;
214 213
215 for_each_netdev(net, dev) { 214 for_each_netdev_rcu(net, dev) {
216 if (!is_vlan_dev(dev)) 215 if (!is_vlan_dev(dev))
217 continue; 216 continue;
218 217
@@ -234,7 +233,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
234 if (v == SEQ_START_TOKEN) 233 if (v == SEQ_START_TOKEN)
235 dev = net_device_entry(&net->dev_base_head); 234 dev = net_device_entry(&net->dev_base_head);
236 235
237 for_each_netdev_continue(net, dev) { 236 for_each_netdev_continue_rcu(net, dev) {
238 if (!is_vlan_dev(dev)) 237 if (!is_vlan_dev(dev))
239 continue; 238 continue;
240 239
@@ -245,9 +244,9 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
245} 244}
246 245
247static void vlan_seq_stop(struct seq_file *seq, void *v) 246static void vlan_seq_stop(struct seq_file *seq, void *v)
248 __releases(dev_base_lock) 247 __releases(rcu)
249{ 248{
250 read_unlock(&dev_base_lock); 249 rcu_read_unlock();
251} 250}
252 251
253static int vlan_seq_show(struct seq_file *seq, void *v) 252static int vlan_seq_show(struct seq_file *seq, void *v)
diff --git a/net/9p/client.c b/net/9p/client.c
index 5bf5f227dbe0..8af95b2dddd6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -582,11 +582,9 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
582 582
583 memset(&fid->qid, 0, sizeof(struct p9_qid)); 583 memset(&fid->qid, 0, sizeof(struct p9_qid));
584 fid->mode = -1; 584 fid->mode = -1;
585 fid->rdir_fpos = 0;
586 fid->uid = current_fsuid(); 585 fid->uid = current_fsuid();
587 fid->clnt = clnt; 586 fid->clnt = clnt;
588 fid->aux = NULL; 587 fid->rdir = NULL;
589
590 spin_lock_irqsave(&clnt->lock, flags); 588 spin_lock_irqsave(&clnt->lock, flags);
591 list_add(&fid->flist, &clnt->fidlist); 589 list_add(&fid->flist, &clnt->fidlist);
592 spin_unlock_irqrestore(&clnt->lock, flags); 590 spin_unlock_irqrestore(&clnt->lock, flags);
@@ -609,6 +607,7 @@ static void p9_fid_destroy(struct p9_fid *fid)
609 spin_lock_irqsave(&clnt->lock, flags); 607 spin_lock_irqsave(&clnt->lock, flags);
610 list_del(&fid->flist); 608 list_del(&fid->flist);
611 spin_unlock_irqrestore(&clnt->lock, flags); 609 spin_unlock_irqrestore(&clnt->lock, flags);
610 kfree(fid->rdir);
612 kfree(fid); 611 kfree(fid);
613} 612}
614 613
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 8d934dd7fd54..4dd873e3a1bb 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -633,8 +633,8 @@ static void p9_poll_mux(struct p9_conn *m)
633 if (n & POLLOUT) { 633 if (n & POLLOUT) {
634 set_bit(Wpending, &m->wsched); 634 set_bit(Wpending, &m->wsched);
635 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); 635 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
636 if ((m->wsize || !list_empty(&m->unsent_req_list)) 636 if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
637 && !test_and_set_bit(Wworksched, &m->wsched)) { 637 !test_and_set_bit(Wworksched, &m->wsched)) {
638 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); 638 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
639 queue_work(p9_mux_wq, &m->wq); 639 queue_work(p9_mux_wq, &m->wq);
640 } 640 }
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index b2e07f0dd298..ea1e3daabefe 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,7 +43,6 @@
43#include <net/9p/transport.h> 43#include <net/9p/transport.h>
44#include <linux/scatterlist.h> 44#include <linux/scatterlist.h>
45#include <linux/virtio.h> 45#include <linux/virtio.h>
46#include <linux/virtio_ids.h>
47#include <linux/virtio_9p.h> 46#include <linux/virtio_9p.h>
48 47
49#define VIRTQUEUE_NUM 128 48#define VIRTQUEUE_NUM 128
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index abe38014b7fd..9fc4da56fb1d 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -56,6 +56,7 @@
56#include <linux/if_arp.h> 56#include <linux/if_arp.h>
57#include <linux/smp_lock.h> 57#include <linux/smp_lock.h>
58#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 58#include <linux/termios.h> /* For TIOCOUTQ/INQ */
59#include <linux/compat.h>
59#include <net/datalink.h> 60#include <net/datalink.h>
60#include <net/psnap.h> 61#include <net/psnap.h>
61#include <net/sock.h> 62#include <net/sock.h>
@@ -922,13 +923,8 @@ static unsigned long atalk_sum_partial(const unsigned char *data,
922{ 923{
923 /* This ought to be unwrapped neatly. I'll trust gcc for now */ 924 /* This ought to be unwrapped neatly. I'll trust gcc for now */
924 while (len--) { 925 while (len--) {
925 sum += *data; 926 sum += *data++;
926 sum <<= 1; 927 sum = rol16(sum, 1);
927 if (sum & 0x10000) {
928 sum++;
929 sum &= 0xffff;
930 }
931 data++;
932 } 928 }
933 return sum; 929 return sum;
934} 930}
@@ -1021,12 +1017,13 @@ static struct proto ddp_proto = {
1021 * Create a socket. Initialise the socket, blank the addresses 1017 * Create a socket. Initialise the socket, blank the addresses
1022 * set the state. 1018 * set the state.
1023 */ 1019 */
1024static int atalk_create(struct net *net, struct socket *sock, int protocol) 1020static int atalk_create(struct net *net, struct socket *sock, int protocol,
1021 int kern)
1025{ 1022{
1026 struct sock *sk; 1023 struct sock *sk;
1027 int rc = -ESOCKTNOSUPPORT; 1024 int rc = -ESOCKTNOSUPPORT;
1028 1025
1029 if (net != &init_net) 1026 if (!net_eq(net, &init_net))
1030 return -EAFNOSUPPORT; 1027 return -EAFNOSUPPORT;
1031 1028
1032 /* 1029 /*
@@ -1054,11 +1051,13 @@ static int atalk_release(struct socket *sock)
1054{ 1051{
1055 struct sock *sk = sock->sk; 1052 struct sock *sk = sock->sk;
1056 1053
1054 lock_kernel();
1057 if (sk) { 1055 if (sk) {
1058 sock_orphan(sk); 1056 sock_orphan(sk);
1059 sock->sk = NULL; 1057 sock->sk = NULL;
1060 atalk_destroy_socket(sk); 1058 atalk_destroy_socket(sk);
1061 } 1059 }
1060 unlock_kernel();
1062 return 0; 1061 return 0;
1063} 1062}
1064 1063
@@ -1134,6 +1133,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1134 struct sockaddr_at *addr = (struct sockaddr_at *)uaddr; 1133 struct sockaddr_at *addr = (struct sockaddr_at *)uaddr;
1135 struct sock *sk = sock->sk; 1134 struct sock *sk = sock->sk;
1136 struct atalk_sock *at = at_sk(sk); 1135 struct atalk_sock *at = at_sk(sk);
1136 int err;
1137 1137
1138 if (!sock_flag(sk, SOCK_ZAPPED) || 1138 if (!sock_flag(sk, SOCK_ZAPPED) ||
1139 addr_len != sizeof(struct sockaddr_at)) 1139 addr_len != sizeof(struct sockaddr_at))
@@ -1142,37 +1142,44 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1142 if (addr->sat_family != AF_APPLETALK) 1142 if (addr->sat_family != AF_APPLETALK)
1143 return -EAFNOSUPPORT; 1143 return -EAFNOSUPPORT;
1144 1144
1145 lock_kernel();
1145 if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) { 1146 if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) {
1146 struct atalk_addr *ap = atalk_find_primary(); 1147 struct atalk_addr *ap = atalk_find_primary();
1147 1148
1149 err = -EADDRNOTAVAIL;
1148 if (!ap) 1150 if (!ap)
1149 return -EADDRNOTAVAIL; 1151 goto out;
1150 1152
1151 at->src_net = addr->sat_addr.s_net = ap->s_net; 1153 at->src_net = addr->sat_addr.s_net = ap->s_net;
1152 at->src_node = addr->sat_addr.s_node= ap->s_node; 1154 at->src_node = addr->sat_addr.s_node= ap->s_node;
1153 } else { 1155 } else {
1156 err = -EADDRNOTAVAIL;
1154 if (!atalk_find_interface(addr->sat_addr.s_net, 1157 if (!atalk_find_interface(addr->sat_addr.s_net,
1155 addr->sat_addr.s_node)) 1158 addr->sat_addr.s_node))
1156 return -EADDRNOTAVAIL; 1159 goto out;
1157 1160
1158 at->src_net = addr->sat_addr.s_net; 1161 at->src_net = addr->sat_addr.s_net;
1159 at->src_node = addr->sat_addr.s_node; 1162 at->src_node = addr->sat_addr.s_node;
1160 } 1163 }
1161 1164
1162 if (addr->sat_port == ATADDR_ANYPORT) { 1165 if (addr->sat_port == ATADDR_ANYPORT) {
1163 int n = atalk_pick_and_bind_port(sk, addr); 1166 err = atalk_pick_and_bind_port(sk, addr);
1164 1167
1165 if (n < 0) 1168 if (err < 0)
1166 return n; 1169 goto out;
1167 } else { 1170 } else {
1168 at->src_port = addr->sat_port; 1171 at->src_port = addr->sat_port;
1169 1172
1173 err = -EADDRINUSE;
1170 if (atalk_find_or_insert_socket(sk, addr)) 1174 if (atalk_find_or_insert_socket(sk, addr))
1171 return -EADDRINUSE; 1175 goto out;
1172 } 1176 }
1173 1177
1174 sock_reset_flag(sk, SOCK_ZAPPED); 1178 sock_reset_flag(sk, SOCK_ZAPPED);
1175 return 0; 1179 err = 0;
1180out:
1181 unlock_kernel();
1182 return err;
1176} 1183}
1177 1184
1178/* Set the address we talk to */ 1185/* Set the address we talk to */
@@ -1182,6 +1189,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1182 struct sock *sk = sock->sk; 1189 struct sock *sk = sock->sk;
1183 struct atalk_sock *at = at_sk(sk); 1190 struct atalk_sock *at = at_sk(sk);
1184 struct sockaddr_at *addr; 1191 struct sockaddr_at *addr;
1192 int err;
1185 1193
1186 sk->sk_state = TCP_CLOSE; 1194 sk->sk_state = TCP_CLOSE;
1187 sock->state = SS_UNCONNECTED; 1195 sock->state = SS_UNCONNECTED;
@@ -1206,12 +1214,15 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1206#endif 1214#endif
1207 } 1215 }
1208 1216
1217 lock_kernel();
1218 err = -EBUSY;
1209 if (sock_flag(sk, SOCK_ZAPPED)) 1219 if (sock_flag(sk, SOCK_ZAPPED))
1210 if (atalk_autobind(sk) < 0) 1220 if (atalk_autobind(sk) < 0)
1211 return -EBUSY; 1221 goto out;
1212 1222
1223 err = -ENETUNREACH;
1213 if (!atrtr_get_dev(&addr->sat_addr)) 1224 if (!atrtr_get_dev(&addr->sat_addr))
1214 return -ENETUNREACH; 1225 goto out;
1215 1226
1216 at->dest_port = addr->sat_port; 1227 at->dest_port = addr->sat_port;
1217 at->dest_net = addr->sat_addr.s_net; 1228 at->dest_net = addr->sat_addr.s_net;
@@ -1219,7 +1230,10 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1219 1230
1220 sock->state = SS_CONNECTED; 1231 sock->state = SS_CONNECTED;
1221 sk->sk_state = TCP_ESTABLISHED; 1232 sk->sk_state = TCP_ESTABLISHED;
1222 return 0; 1233 err = 0;
1234out:
1235 unlock_kernel();
1236 return err;
1223} 1237}
1224 1238
1225/* 1239/*
@@ -1232,17 +1246,21 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1232 struct sockaddr_at sat; 1246 struct sockaddr_at sat;
1233 struct sock *sk = sock->sk; 1247 struct sock *sk = sock->sk;
1234 struct atalk_sock *at = at_sk(sk); 1248 struct atalk_sock *at = at_sk(sk);
1249 int err;
1235 1250
1251 lock_kernel();
1252 err = -ENOBUFS;
1236 if (sock_flag(sk, SOCK_ZAPPED)) 1253 if (sock_flag(sk, SOCK_ZAPPED))
1237 if (atalk_autobind(sk) < 0) 1254 if (atalk_autobind(sk) < 0)
1238 return -ENOBUFS; 1255 goto out;
1239 1256
1240 *uaddr_len = sizeof(struct sockaddr_at); 1257 *uaddr_len = sizeof(struct sockaddr_at);
1241 memset(&sat.sat_zero, 0, sizeof(sat.sat_zero)); 1258 memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
1242 1259
1243 if (peer) { 1260 if (peer) {
1261 err = -ENOTCONN;
1244 if (sk->sk_state != TCP_ESTABLISHED) 1262 if (sk->sk_state != TCP_ESTABLISHED)
1245 return -ENOTCONN; 1263 goto out;
1246 1264
1247 sat.sat_addr.s_net = at->dest_net; 1265 sat.sat_addr.s_net = at->dest_net;
1248 sat.sat_addr.s_node = at->dest_node; 1266 sat.sat_addr.s_node = at->dest_node;
@@ -1253,9 +1271,23 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1253 sat.sat_port = at->src_port; 1271 sat.sat_port = at->src_port;
1254 } 1272 }
1255 1273
1274 err = 0;
1256 sat.sat_family = AF_APPLETALK; 1275 sat.sat_family = AF_APPLETALK;
1257 memcpy(uaddr, &sat, sizeof(sat)); 1276 memcpy(uaddr, &sat, sizeof(sat));
1258 return 0; 1277
1278out:
1279 unlock_kernel();
1280 return err;
1281}
1282
1283static unsigned int atalk_poll(struct file *file, struct socket *sock,
1284 poll_table *wait)
1285{
1286 int err;
1287 lock_kernel();
1288 err = datagram_poll(file, sock, wait);
1289 unlock_kernel();
1290 return err;
1259} 1291}
1260 1292
1261#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE) 1293#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
@@ -1563,23 +1595,28 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1563 if (len > DDP_MAXSZ) 1595 if (len > DDP_MAXSZ)
1564 return -EMSGSIZE; 1596 return -EMSGSIZE;
1565 1597
1598 lock_kernel();
1566 if (usat) { 1599 if (usat) {
1600 err = -EBUSY;
1567 if (sock_flag(sk, SOCK_ZAPPED)) 1601 if (sock_flag(sk, SOCK_ZAPPED))
1568 if (atalk_autobind(sk) < 0) 1602 if (atalk_autobind(sk) < 0)
1569 return -EBUSY; 1603 goto out;
1570 1604
1605 err = -EINVAL;
1571 if (msg->msg_namelen < sizeof(*usat) || 1606 if (msg->msg_namelen < sizeof(*usat) ||
1572 usat->sat_family != AF_APPLETALK) 1607 usat->sat_family != AF_APPLETALK)
1573 return -EINVAL; 1608 goto out;
1574 1609
1610 err = -EPERM;
1575 /* netatalk didn't implement this check */ 1611 /* netatalk didn't implement this check */
1576 if (usat->sat_addr.s_node == ATADDR_BCAST && 1612 if (usat->sat_addr.s_node == ATADDR_BCAST &&
1577 !sock_flag(sk, SOCK_BROADCAST)) { 1613 !sock_flag(sk, SOCK_BROADCAST)) {
1578 return -EPERM; 1614 goto out;
1579 } 1615 }
1580 } else { 1616 } else {
1617 err = -ENOTCONN;
1581 if (sk->sk_state != TCP_ESTABLISHED) 1618 if (sk->sk_state != TCP_ESTABLISHED)
1582 return -ENOTCONN; 1619 goto out;
1583 usat = &local_satalk; 1620 usat = &local_satalk;
1584 usat->sat_family = AF_APPLETALK; 1621 usat->sat_family = AF_APPLETALK;
1585 usat->sat_port = at->dest_port; 1622 usat->sat_port = at->dest_port;
@@ -1603,8 +1640,9 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1603 1640
1604 rt = atrtr_find(&at_hint); 1641 rt = atrtr_find(&at_hint);
1605 } 1642 }
1643 err = ENETUNREACH;
1606 if (!rt) 1644 if (!rt)
1607 return -ENETUNREACH; 1645 goto out;
1608 1646
1609 dev = rt->dev; 1647 dev = rt->dev;
1610 1648
@@ -1614,7 +1652,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1614 size += dev->hard_header_len; 1652 size += dev->hard_header_len;
1615 skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); 1653 skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
1616 if (!skb) 1654 if (!skb)
1617 return err; 1655 goto out;
1618 1656
1619 skb->sk = sk; 1657 skb->sk = sk;
1620 skb_reserve(skb, ddp_dl->header_length); 1658 skb_reserve(skb, ddp_dl->header_length);
@@ -1637,7 +1675,8 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1637 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1675 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1638 if (err) { 1676 if (err) {
1639 kfree_skb(skb); 1677 kfree_skb(skb);
1640 return -EFAULT; 1678 err = -EFAULT;
1679 goto out;
1641 } 1680 }
1642 1681
1643 if (sk->sk_no_check == 1) 1682 if (sk->sk_no_check == 1)
@@ -1676,7 +1715,8 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1676 rt = atrtr_find(&at_lo); 1715 rt = atrtr_find(&at_lo);
1677 if (!rt) { 1716 if (!rt) {
1678 kfree_skb(skb); 1717 kfree_skb(skb);
1679 return -ENETUNREACH; 1718 err = -ENETUNREACH;
1719 goto out;
1680 } 1720 }
1681 dev = rt->dev; 1721 dev = rt->dev;
1682 skb->dev = dev; 1722 skb->dev = dev;
@@ -1696,7 +1736,9 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1696 } 1736 }
1697 SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); 1737 SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len);
1698 1738
1699 return len; 1739out:
1740 unlock_kernel();
1741 return err ? : len;
1700} 1742}
1701 1743
1702static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 1744static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
@@ -1708,10 +1750,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1708 int copied = 0; 1750 int copied = 0;
1709 int offset = 0; 1751 int offset = 0;
1710 int err = 0; 1752 int err = 0;
1711 struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1753 struct sk_buff *skb;
1754
1755 lock_kernel();
1756 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1712 flags & MSG_DONTWAIT, &err); 1757 flags & MSG_DONTWAIT, &err);
1713 if (!skb) 1758 if (!skb)
1714 return err; 1759 goto out;
1715 1760
1716 /* FIXME: use skb->cb to be able to use shared skbs */ 1761 /* FIXME: use skb->cb to be able to use shared skbs */
1717 ddp = ddp_hdr(skb); 1762 ddp = ddp_hdr(skb);
@@ -1739,6 +1784,9 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1739 } 1784 }
1740 1785
1741 skb_free_datagram(sk, skb); /* Free the datagram. */ 1786 skb_free_datagram(sk, skb); /* Free the datagram. */
1787
1788out:
1789 unlock_kernel();
1742 return err ? : copied; 1790 return err ? : copied;
1743} 1791}
1744 1792
@@ -1810,12 +1858,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1810static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1858static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1811{ 1859{
1812 /* 1860 /*
1813 * All Appletalk ioctls except SIOCATALKDIFADDR are standard. And 1861 * SIOCATALKDIFADDR is a SIOCPROTOPRIVATE ioctl number, so we
1814 * SIOCATALKDIFADDR is handled by upper layer as well, so there is 1862 * cannot handle it in common code. The data we access if ifreq
1815 * nothing to do. Eventually SIOCATALKDIFADDR should be moved 1863 * here is compatible, so we can simply call the native
1816 * here so there is no generic SIOCPROTOPRIVATE translation in the 1864 * handler.
1817 * system.
1818 */ 1865 */
1866 if (cmd == SIOCATALKDIFADDR)
1867 return atalk_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1868
1819 return -ENOIOCTLCMD; 1869 return -ENOIOCTLCMD;
1820} 1870}
1821#endif 1871#endif
@@ -1827,7 +1877,7 @@ static const struct net_proto_family atalk_family_ops = {
1827 .owner = THIS_MODULE, 1877 .owner = THIS_MODULE,
1828}; 1878};
1829 1879
1830static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = { 1880static const struct proto_ops atalk_dgram_ops = {
1831 .family = PF_APPLETALK, 1881 .family = PF_APPLETALK,
1832 .owner = THIS_MODULE, 1882 .owner = THIS_MODULE,
1833 .release = atalk_release, 1883 .release = atalk_release,
@@ -1836,7 +1886,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = {
1836 .socketpair = sock_no_socketpair, 1886 .socketpair = sock_no_socketpair,
1837 .accept = sock_no_accept, 1887 .accept = sock_no_accept,
1838 .getname = atalk_getname, 1888 .getname = atalk_getname,
1839 .poll = datagram_poll, 1889 .poll = atalk_poll,
1840 .ioctl = atalk_ioctl, 1890 .ioctl = atalk_ioctl,
1841#ifdef CONFIG_COMPAT 1891#ifdef CONFIG_COMPAT
1842 .compat_ioctl = atalk_compat_ioctl, 1892 .compat_ioctl = atalk_compat_ioctl,
@@ -1851,8 +1901,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = {
1851 .sendpage = sock_no_sendpage, 1901 .sendpage = sock_no_sendpage,
1852}; 1902};
1853 1903
1854SOCKOPS_WRAP(atalk_dgram, PF_APPLETALK);
1855
1856static struct notifier_block ddp_notifier = { 1904static struct notifier_block ddp_notifier = {
1857 .notifier_call = ddp_device_event, 1905 .notifier_call = ddp_device_event,
1858}; 1906};
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 4da8892ced5f..2ea40995dced 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -191,8 +191,181 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
191} 191}
192 192
193#ifdef CONFIG_COMPAT 193#ifdef CONFIG_COMPAT
194int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 194/*
195 * FIXME:
196 * The compat_ioctl handling is duplicated, using both these conversion
197 * routines and the compat argument to the actual handlers. Both
198 * versions are somewhat incomplete and should be merged, e.g. by
199 * moving the ioctl number translation into the actual handlers and
200 * killing the conversion code.
201 *
202 * -arnd, November 2009
203 */
204#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc)
205#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf)
206#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc)
207#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc)
208#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc)
209#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc)
210#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc)
211#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc)
212#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc)
213#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc)
214#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc)
215#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc)
216#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc)
217#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc)
218#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc)
219#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc)
220#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc)
221
222static struct {
223 unsigned int cmd32;
224 unsigned int cmd;
225} atm_ioctl_map[] = {
226 { ATM_GETLINKRATE32, ATM_GETLINKRATE },
227 { ATM_GETNAMES32, ATM_GETNAMES },
228 { ATM_GETTYPE32, ATM_GETTYPE },
229 { ATM_GETESI32, ATM_GETESI },
230 { ATM_GETADDR32, ATM_GETADDR },
231 { ATM_RSTADDR32, ATM_RSTADDR },
232 { ATM_ADDADDR32, ATM_ADDADDR },
233 { ATM_DELADDR32, ATM_DELADDR },
234 { ATM_GETCIRANGE32, ATM_GETCIRANGE },
235 { ATM_SETCIRANGE32, ATM_SETCIRANGE },
236 { ATM_SETESI32, ATM_SETESI },
237 { ATM_SETESIF32, ATM_SETESIF },
238 { ATM_GETSTAT32, ATM_GETSTAT },
239 { ATM_GETSTATZ32, ATM_GETSTATZ },
240 { ATM_GETLOOP32, ATM_GETLOOP },
241 { ATM_SETLOOP32, ATM_SETLOOP },
242 { ATM_QUERYLOOP32, ATM_QUERYLOOP },
243};
244
245#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
246
247static int do_atm_iobuf(struct socket *sock, unsigned int cmd,
248 unsigned long arg)
249{
250 struct atm_iobuf __user *iobuf;
251 struct compat_atm_iobuf __user *iobuf32;
252 u32 data;
253 void __user *datap;
254 int len, err;
255
256 iobuf = compat_alloc_user_space(sizeof(*iobuf));
257 iobuf32 = compat_ptr(arg);
258
259 if (get_user(len, &iobuf32->length) ||
260 get_user(data, &iobuf32->buffer))
261 return -EFAULT;
262 datap = compat_ptr(data);
263 if (put_user(len, &iobuf->length) ||
264 put_user(datap, &iobuf->buffer))
265 return -EFAULT;
266
267 err = do_vcc_ioctl(sock, cmd, (unsigned long) iobuf, 0);
268
269 if (!err) {
270 if (copy_in_user(&iobuf32->length, &iobuf->length,
271 sizeof(int)))
272 err = -EFAULT;
273 }
274
275 return err;
276}
277
278static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
279 unsigned long arg)
280{
281 struct atmif_sioc __user *sioc;
282 struct compat_atmif_sioc __user *sioc32;
283 u32 data;
284 void __user *datap;
285 int err;
286
287 sioc = compat_alloc_user_space(sizeof(*sioc));
288 sioc32 = compat_ptr(arg);
289
290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int))
291 || get_user(data, &sioc32->arg))
292 return -EFAULT;
293 datap = compat_ptr(data);
294 if (put_user(datap, &sioc->arg))
295 return -EFAULT;
296
297 err = do_vcc_ioctl(sock, cmd, (unsigned long) sioc, 0);
298
299 if (!err) {
300 if (copy_in_user(&sioc32->length, &sioc->length,
301 sizeof(int)))
302 err = -EFAULT;
303 }
304 return err;
305}
306
307static int do_atm_ioctl(struct socket *sock, unsigned int cmd32,
308 unsigned long arg)
309{
310 int i;
311 unsigned int cmd = 0;
312
313 switch (cmd32) {
314 case SONET_GETSTAT:
315 case SONET_GETSTATZ:
316 case SONET_GETDIAG:
317 case SONET_SETDIAG:
318 case SONET_CLRDIAG:
319 case SONET_SETFRAMING:
320 case SONET_GETFRAMING:
321 case SONET_GETFRSENSE:
322 return do_atmif_sioc(sock, cmd32, arg);
323 }
324
325 for (i = 0; i < NR_ATM_IOCTL; i++) {
326 if (cmd32 == atm_ioctl_map[i].cmd32) {
327 cmd = atm_ioctl_map[i].cmd;
328 break;
329 }
330 }
331 if (i == NR_ATM_IOCTL)
332 return -EINVAL;
333
334 switch (cmd) {
335 case ATM_GETNAMES:
336 return do_atm_iobuf(sock, cmd, arg);
337
338 case ATM_GETLINKRATE:
339 case ATM_GETTYPE:
340 case ATM_GETESI:
341 case ATM_GETADDR:
342 case ATM_RSTADDR:
343 case ATM_ADDADDR:
344 case ATM_DELADDR:
345 case ATM_GETCIRANGE:
346 case ATM_SETCIRANGE:
347 case ATM_SETESI:
348 case ATM_SETESIF:
349 case ATM_GETSTAT:
350 case ATM_GETSTATZ:
351 case ATM_GETLOOP:
352 case ATM_SETLOOP:
353 case ATM_QUERYLOOP:
354 return do_atmif_sioc(sock, cmd, arg);
355 }
356
357 return -EINVAL;
358}
359
360int vcc_compat_ioctl(struct socket *sock, unsigned int cmd,
361 unsigned long arg)
195{ 362{
196 return do_vcc_ioctl(sock, cmd, arg, 1); 363 int ret;
364
365 ret = do_vcc_ioctl(sock, cmd, arg, 1);
366 if (ret != -ENOIOCTLCMD)
367 return ret;
368
369 return do_atm_ioctl(sock, cmd, arg);
197} 370}
198#endif 371#endif
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index a6e1fdbae87f..8d74e62b0d79 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -127,7 +127,8 @@ static const struct proto_ops pvc_proto_ops = {
127}; 127};
128 128
129 129
130static int pvc_create(struct net *net, struct socket *sock,int protocol) 130static int pvc_create(struct net *net, struct socket *sock, int protocol,
131 int kern)
131{ 132{
132 if (net != &init_net) 133 if (net != &init_net)
133 return -EAFNOSUPPORT; 134 return -EAFNOSUPPORT;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 819354233318..66e1d9b3e5de 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -25,7 +25,7 @@
25#include "signaling.h" 25#include "signaling.h"
26#include "addr.h" 26#include "addr.h"
27 27
28static int svc_create(struct net *net, struct socket *sock,int protocol); 28static int svc_create(struct net *net, struct socket *sock, int protocol, int kern);
29 29
30/* 30/*
31 * Note: since all this is still nicely synchronized with the signaling demon, 31 * Note: since all this is still nicely synchronized with the signaling demon,
@@ -330,7 +330,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
330 330
331 lock_sock(sk); 331 lock_sock(sk);
332 332
333 error = svc_create(sock_net(sk), newsock,0); 333 error = svc_create(sock_net(sk), newsock, 0, 0);
334 if (error) 334 if (error)
335 goto out; 335 goto out;
336 336
@@ -650,11 +650,12 @@ static const struct proto_ops svc_proto_ops = {
650}; 650};
651 651
652 652
653static int svc_create(struct net *net, struct socket *sock,int protocol) 653static int svc_create(struct net *net, struct socket *sock, int protocol,
654 int kern)
654{ 655{
655 int error; 656 int error;
656 657
657 if (net != &init_net) 658 if (!net_eq(net, &init_net))
658 return -EAFNOSUPPORT; 659 return -EAFNOSUPPORT;
659 660
660 sock->ops = &svc_proto_ops; 661 sock->ops = &svc_proto_ops;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f05306f168fa..5588ba69c468 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -369,6 +369,9 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
369 if (ax25_ctl.digi_count > AX25_MAX_DIGIS) 369 if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
370 return -EINVAL; 370 return -EINVAL;
371 371
372 if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
373 return -EINVAL;
374
372 digi.ndigi = ax25_ctl.digi_count; 375 digi.ndigi = ax25_ctl.digi_count;
373 for (k = 0; k < digi.ndigi; k++) 376 for (k = 0; k < digi.ndigi; k++)
374 digi.calls[k] = ax25_ctl.digi_addr[k]; 377 digi.calls[k] = ax25_ctl.digi_addr[k];
@@ -418,14 +421,10 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
418 break; 421 break;
419 422
420 case AX25_T3: 423 case AX25_T3:
421 if (ax25_ctl.arg < 0)
422 goto einval_put;
423 ax25->t3 = ax25_ctl.arg * HZ; 424 ax25->t3 = ax25_ctl.arg * HZ;
424 break; 425 break;
425 426
426 case AX25_IDLE: 427 case AX25_IDLE:
427 if (ax25_ctl.arg < 0)
428 goto einval_put;
429 ax25->idle = ax25_ctl.arg * 60 * HZ; 428 ax25->idle = ax25_ctl.arg * 60 * HZ;
430 break; 429 break;
431 430
@@ -800,12 +799,13 @@ static struct proto ax25_proto = {
800 .obj_size = sizeof(struct sock), 799 .obj_size = sizeof(struct sock),
801}; 800};
802 801
803static int ax25_create(struct net *net, struct socket *sock, int protocol) 802static int ax25_create(struct net *net, struct socket *sock, int protocol,
803 int kern)
804{ 804{
805 struct sock *sk; 805 struct sock *sk;
806 ax25_cb *ax25; 806 ax25_cb *ax25;
807 807
808 if (net != &init_net) 808 if (!net_eq(net, &init_net))
809 return -EAFNOSUPPORT; 809 return -EAFNOSUPPORT;
810 810
811 switch (sock->type) { 811 switch (sock->type) {
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 399e59c9c6cb..087cc51f5927 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -126,7 +126,8 @@ int bt_sock_unregister(int proto)
126} 126}
127EXPORT_SYMBOL(bt_sock_unregister); 127EXPORT_SYMBOL(bt_sock_unregister);
128 128
129static int bt_sock_create(struct net *net, struct socket *sock, int proto) 129static int bt_sock_create(struct net *net, struct socket *sock, int proto,
130 int kern)
130{ 131{
131 int err; 132 int err;
132 133
@@ -144,7 +145,7 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto)
144 read_lock(&bt_proto_lock); 145 read_lock(&bt_proto_lock);
145 146
146 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { 147 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
147 err = bt_proto[proto]->create(net, sock, proto); 148 err = bt_proto[proto]->create(net, sock, proto, kern);
148 bt_sock_reclassify_lock(sock, proto); 149 bt_sock_reclassify_lock(sock, proto);
149 module_put(bt_proto[proto]->owner); 150 module_put(bt_proto[proto]->owner);
150 } 151 }
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 0a2c5460bb48..2ff6ac7b2ed4 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -195,7 +195,8 @@ static struct proto bnep_proto = {
195 .obj_size = sizeof(struct bt_sock) 195 .obj_size = sizeof(struct bt_sock)
196}; 196};
197 197
198static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) 198static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
199 int kern)
199{ 200{
200 struct sock *sk; 201 struct sock *sk;
201 202
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index de7c8040bc56..978cc3a718ad 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -190,7 +190,8 @@ static struct proto cmtp_proto = {
190 .obj_size = sizeof(struct bt_sock) 190 .obj_size = sizeof(struct bt_sock)
191}; 191};
192 192
193static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) 193static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
194 int kern)
194{ 195{
195 struct sock *sk; 196 struct sock *sk;
196 197
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a9750984f772..b7c4224f4e7d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
211 conn->type = type; 211 conn->type = type;
212 conn->mode = HCI_CM_ACTIVE; 212 conn->mode = HCI_CM_ACTIVE;
213 conn->state = BT_OPEN; 213 conn->state = BT_OPEN;
214 conn->auth_type = HCI_AT_GENERAL_BONDING;
214 215
215 conn->power_save = 1; 216 conn->power_save = 1;
216 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 217 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index e7395f231989..1ca5c7ca9bd4 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -621,7 +621,8 @@ static struct proto hci_sk_proto = {
621 .obj_size = sizeof(struct hci_pinfo) 621 .obj_size = sizeof(struct hci_pinfo)
622}; 622};
623 623
624static int hci_sock_create(struct net *net, struct socket *sock, int protocol) 624static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
625 int kern)
625{ 626{
626 struct sock *sk; 627 struct sock *sk;
627 628
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 4beb6a7a2953..9cfef68b9fec 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -241,7 +241,8 @@ static struct proto hidp_proto = {
241 .obj_size = sizeof(struct bt_sock) 241 .obj_size = sizeof(struct bt_sock)
242}; 242};
243 243
244static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) 244static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
245 int kern)
245{ 246{
246 struct sock *sk; 247 struct sock *sk;
247 248
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index d65101d92ee5..54992f782301 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -819,7 +819,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
819 return sk; 819 return sk;
820} 820}
821 821
822static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol) 822static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
823 int kern)
823{ 824{
824 struct sock *sk; 825 struct sock *sk;
825 826
@@ -831,7 +832,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
831 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) 832 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
832 return -ESOCKTNOSUPPORT; 833 return -ESOCKTNOSUPPORT;
833 834
834 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW)) 835 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
835 return -EPERM; 836 return -EPERM;
836 837
837 sock->ops = &l2cap_sock_ops; 838 sock->ops = &l2cap_sock_ops;
@@ -1361,8 +1362,8 @@ static int l2cap_ertm_send(struct sock *sk)
1361 if (pi->conn_state & L2CAP_CONN_WAIT_F) 1362 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1362 return 0; 1363 return 0;
1363 1364
1364 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) 1365 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1365 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { 1366 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1366 tx_skb = skb_clone(skb, GFP_ATOMIC); 1367 tx_skb = skb_clone(skb, GFP_ATOMIC);
1367 1368
1368 if (pi->remote_max_tx && 1369 if (pi->remote_max_tx &&
@@ -1603,8 +1604,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1603 return -EOPNOTSUPP; 1604 return -EOPNOTSUPP;
1604 1605
1605 /* Check outgoing MTU */ 1606 /* Check outgoing MTU */
1606 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC 1607 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1607 && len > pi->omtu) 1608 len > pi->omtu)
1608 return -EINVAL; 1609 return -EINVAL;
1609 1610
1610 lock_sock(sk); 1611 lock_sock(sk);
@@ -2205,7 +2206,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2205{ 2206{
2206 struct l2cap_pinfo *pi = l2cap_pi(sk); 2207 struct l2cap_pinfo *pi = l2cap_pi(sk);
2207 struct l2cap_conf_req *req = data; 2208 struct l2cap_conf_req *req = data;
2208 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM }; 2209 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2209 void *ptr = req->data; 2210 void *ptr = req->data;
2210 2211
2211 BT_DBG("sk %p", sk); 2212 BT_DBG("sk %p", sk);
@@ -2394,6 +2395,10 @@ done:
2394 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 2395 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2395 2396
2396 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2397 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2398
2399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2400 sizeof(rfc), (unsigned long) &rfc);
2401
2397 break; 2402 break;
2398 2403
2399 case L2CAP_MODE_STREAMING: 2404 case L2CAP_MODE_STREAMING:
@@ -2401,6 +2406,10 @@ done:
2401 pi->max_pdu_size = rfc.max_pdu_size; 2406 pi->max_pdu_size = rfc.max_pdu_size;
2402 2407
2403 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2408 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2409
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2412
2404 break; 2413 break;
2405 2414
2406 default: 2415 default:
@@ -2410,9 +2419,6 @@ done:
2410 rfc.mode = pi->mode; 2419 rfc.mode = pi->mode;
2411 } 2420 }
2412 2421
2413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2414 sizeof(rfc), (unsigned long) &rfc);
2415
2416 if (result == L2CAP_CONF_SUCCESS) 2422 if (result == L2CAP_CONF_SUCCESS)
2417 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2423 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2418 } 2424 }
@@ -2750,8 +2756,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2750 goto unlock; 2756 goto unlock;
2751 2757
2752 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 2758 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2753 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) 2759 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2754 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) 2760 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2755 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; 2761 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2756 2762
2757 sk->sk_state = BT_CONNECTED; 2763 sk->sk_state = BT_CONNECTED;
@@ -2839,8 +2845,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2839 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 2845 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2840 2846
2841 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2847 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2842 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) 2848 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2843 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) 2849 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2844 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; 2850 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2845 2851
2846 sk->sk_state = BT_CONNECTED; 2852 sk->sk_state = BT_CONNECTED;
@@ -3382,8 +3388,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3382 pi->expected_ack_seq = tx_seq; 3388 pi->expected_ack_seq = tx_seq;
3383 l2cap_drop_acked_frames(sk); 3389 l2cap_drop_acked_frames(sk);
3384 3390
3385 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) 3391 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3386 && (pi->unacked_frames > 0)) 3392 (pi->unacked_frames > 0))
3387 __mod_retrans_timer(); 3393 __mod_retrans_timer();
3388 3394
3389 l2cap_ertm_send(sk); 3395 l2cap_ertm_send(sk);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d3bfc1b0afb1..4b5968dda673 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -323,7 +323,8 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
323 return sk; 323 return sk;
324} 324}
325 325
326static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol) 326static int rfcomm_sock_create(struct net *net, struct socket *sock,
327 int protocol, int kern)
327{ 328{
328 struct sock *sk; 329 struct sock *sk;
329 330
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 694a65541b73..dd8f6ec57dce 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -430,7 +430,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
430 return sk; 430 return sk;
431} 431}
432 432
433static int sco_sock_create(struct net *net, struct socket *sock, int protocol) 433static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
434 int kern)
434{ 435{
435 struct sock *sk; 436 struct sock *sk;
436 437
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 57bf05c353bc..3b8e038ab32c 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -60,8 +60,8 @@ static inline unsigned long hold_time(const struct net_bridge *br)
60static inline int has_expired(const struct net_bridge *br, 60static inline int has_expired(const struct net_bridge *br,
61 const struct net_bridge_fdb_entry *fdb) 61 const struct net_bridge_fdb_entry *fdb)
62{ 62{
63 return !fdb->is_static 63 return !fdb->is_static &&
64 && time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 64 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
65} 65}
66 66
67static inline int br_mac_hash(const unsigned char *mac) 67static inline int br_mac_hash(const unsigned char *mac)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 2117e5ba24c8..a2cbe61f6e65 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -377,15 +377,23 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
377 struct net_bridge_port *p; 377 struct net_bridge_port *p;
378 int err = 0; 378 int err = 0;
379 379
380 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER) 380 /* Don't allow bridging non-ethernet like devices */
381 if ((dev->flags & IFF_LOOPBACK) ||
382 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN)
381 return -EINVAL; 383 return -EINVAL;
382 384
385 /* No bridging of bridges */
383 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) 386 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
384 return -ELOOP; 387 return -ELOOP;
385 388
389 /* Device is already being bridged */
386 if (dev->br_port != NULL) 390 if (dev->br_port != NULL)
387 return -EBUSY; 391 return -EBUSY;
388 392
393 /* No bridging devices that dislike that (e.g. wireless) */
394 if (dev->priv_flags & IFF_DONT_BRIDGE)
395 return -EOPNOTSUPP;
396
389 p = new_nbp(br, dev); 397 p = new_nbp(br, dev);
390 if (IS_ERR(p)) 398 if (IS_ERR(p))
391 return PTR_ERR(p); 399 return PTR_ERR(p);
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 6a6433daaf27..2af6e4a90262 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -81,6 +81,7 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
81 return num; 81 return num;
82} 82}
83 83
84/* called with RTNL */
84static int add_del_if(struct net_bridge *br, int ifindex, int isadd) 85static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
85{ 86{
86 struct net_device *dev; 87 struct net_device *dev;
@@ -89,7 +90,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
89 if (!capable(CAP_NET_ADMIN)) 90 if (!capable(CAP_NET_ADMIN))
90 return -EPERM; 91 return -EPERM;
91 92
92 dev = dev_get_by_index(dev_net(br->dev), ifindex); 93 dev = __dev_get_by_index(dev_net(br->dev), ifindex);
93 if (dev == NULL) 94 if (dev == NULL)
94 return -EINVAL; 95 return -EINVAL;
95 96
@@ -98,7 +99,6 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
98 else 99 else
99 ret = br_del_if(br, dev); 100 ret = br_del_if(br, dev);
100 101
101 dev_put(dev);
102 return ret; 102 return ret;
103} 103}
104 104
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index ee4820aa1843..bee4f300d0c8 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -316,9 +316,9 @@ static ssize_t store_group_addr(struct device *d,
316 if (new_addr[5] & ~0xf) 316 if (new_addr[5] & ~0xf)
317 return -EINVAL; 317 return -EINVAL;
318 318
319 if (new_addr[5] == 1 /* 802.3x Pause address */ 319 if (new_addr[5] == 1 || /* 802.3x Pause address */
320 || new_addr[5] == 2 /* 802.3ad Slow protocols */ 320 new_addr[5] == 2 || /* 802.3ad Slow protocols */
321 || new_addr[5] == 3) /* 802.1X PAE address */ 321 new_addr[5] == 3) /* 802.1X PAE address */
322 return -EINVAL; 322 return -EINVAL;
323 323
324 spin_lock_bh(&br->lock); 324 spin_lock_bh(&br->lock);
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 48527e621626..75e29a9cebda 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -135,8 +135,8 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
135 if (memcmp(sp, header, sizeof(header))) 135 if (memcmp(sp, header, sizeof(header)))
136 return false; 136 return false;
137 137
138 if (info->bitmask & EBT_STP_TYPE 138 if (info->bitmask & EBT_STP_TYPE &&
139 && FWINV(info->type != sp->type, EBT_STP_TYPE)) 139 FWINV(info->type != sp->type, EBT_STP_TYPE))
140 return false; 140 return false;
141 141
142 if (sp->type == BPDU_TYPE_CONFIG && 142 if (sp->type == BPDU_TYPE_CONFIG &&
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3f2eb27e1ffb..51adc4c2b860 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -114,7 +114,8 @@ static void can_sock_destruct(struct sock *sk)
114 skb_queue_purge(&sk->sk_receive_queue); 114 skb_queue_purge(&sk->sk_receive_queue);
115} 115}
116 116
117static int can_create(struct net *net, struct socket *sock, int protocol) 117static int can_create(struct net *net, struct socket *sock, int protocol,
118 int kern)
118{ 119{
119 struct sock *sk; 120 struct sock *sk;
120 struct can_proto *cp; 121 struct can_proto *cp;
@@ -125,7 +126,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
125 if (protocol < 0 || protocol >= CAN_NPROTO) 126 if (protocol < 0 || protocol >= CAN_NPROTO)
126 return -EINVAL; 127 return -EINVAL;
127 128
128 if (net != &init_net) 129 if (!net_eq(net, &init_net))
129 return -EAFNOSUPPORT; 130 return -EAFNOSUPPORT;
130 131
131#ifdef CONFIG_MODULES 132#ifdef CONFIG_MODULES
@@ -160,11 +161,6 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
160 goto errout; 161 goto errout;
161 } 162 }
162 163
163 if (cp->capability >= 0 && !capable(cp->capability)) {
164 err = -EPERM;
165 goto errout;
166 }
167
168 sock->ops = cp->ops; 164 sock->ops = cp->ops;
169 165
170 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot); 166 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
@@ -379,8 +375,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
379 return &d->rx[RX_ALL]; 375 return &d->rx[RX_ALL];
380 376
381 /* extra filterlists for the subscription of a single non-RTR can_id */ 377 /* extra filterlists for the subscription of a single non-RTR can_id */
382 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) 378 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
383 && !(*can_id & CAN_RTR_FLAG)) { 379 !(*can_id & CAN_RTR_FLAG)) {
384 380
385 if (*can_id & CAN_EFF_FLAG) { 381 if (*can_id & CAN_EFF_FLAG) {
386 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) { 382 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
@@ -529,8 +525,8 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
529 */ 525 */
530 526
531 hlist_for_each_entry_rcu(r, next, rl, list) { 527 hlist_for_each_entry_rcu(r, next, rl, list) {
532 if (r->can_id == can_id && r->mask == mask 528 if (r->can_id == can_id && r->mask == mask &&
533 && r->func == func && r->data == data) 529 r->func == func && r->data == data)
534 break; 530 break;
535 } 531 }
536 532
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 2f47039c79dd..e32af52238a2 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -132,23 +132,27 @@ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
132/* 132/*
133 * procfs functions 133 * procfs functions
134 */ 134 */
135static char *bcm_proc_getifname(int ifindex) 135static char *bcm_proc_getifname(char *result, int ifindex)
136{ 136{
137 struct net_device *dev; 137 struct net_device *dev;
138 138
139 if (!ifindex) 139 if (!ifindex)
140 return "any"; 140 return "any";
141 141
142 /* no usage counting */ 142 rcu_read_lock();
143 dev = __dev_get_by_index(&init_net, ifindex); 143 dev = dev_get_by_index_rcu(&init_net, ifindex);
144 if (dev) 144 if (dev)
145 return dev->name; 145 strcpy(result, dev->name);
146 else
147 strcpy(result, "???");
148 rcu_read_unlock();
146 149
147 return "???"; 150 return result;
148} 151}
149 152
150static int bcm_proc_show(struct seq_file *m, void *v) 153static int bcm_proc_show(struct seq_file *m, void *v)
151{ 154{
155 char ifname[IFNAMSIZ];
152 struct sock *sk = (struct sock *)m->private; 156 struct sock *sk = (struct sock *)m->private;
153 struct bcm_sock *bo = bcm_sk(sk); 157 struct bcm_sock *bo = bcm_sk(sk);
154 struct bcm_op *op; 158 struct bcm_op *op;
@@ -157,7 +161,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
157 seq_printf(m, " / sk %p", sk); 161 seq_printf(m, " / sk %p", sk);
158 seq_printf(m, " / bo %p", bo); 162 seq_printf(m, " / bo %p", bo);
159 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 163 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
160 seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex)); 164 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
161 seq_printf(m, " <<<\n"); 165 seq_printf(m, " <<<\n");
162 166
163 list_for_each_entry(op, &bo->rx_ops, list) { 167 list_for_each_entry(op, &bo->rx_ops, list) {
@@ -169,7 +173,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
169 continue; 173 continue;
170 174
171 seq_printf(m, "rx_op: %03X %-5s ", 175 seq_printf(m, "rx_op: %03X %-5s ",
172 op->can_id, bcm_proc_getifname(op->ifindex)); 176 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
173 seq_printf(m, "[%d]%c ", op->nframes, 177 seq_printf(m, "[%d]%c ", op->nframes,
174 (op->flags & RX_CHECK_DLC)?'d':' '); 178 (op->flags & RX_CHECK_DLC)?'d':' ');
175 if (op->kt_ival1.tv64) 179 if (op->kt_ival1.tv64)
@@ -194,7 +198,8 @@ static int bcm_proc_show(struct seq_file *m, void *v)
194 list_for_each_entry(op, &bo->tx_ops, list) { 198 list_for_each_entry(op, &bo->tx_ops, list) {
195 199
196 seq_printf(m, "tx_op: %03X %s [%d] ", 200 seq_printf(m, "tx_op: %03X %s [%d] ",
197 op->can_id, bcm_proc_getifname(op->ifindex), 201 op->can_id,
202 bcm_proc_getifname(ifname, op->ifindex),
198 op->nframes); 203 op->nframes);
199 204
200 if (op->kt_ival1.tv64) 205 if (op->kt_ival1.tv64)
@@ -1576,7 +1581,6 @@ static struct proto bcm_proto __read_mostly = {
1576static struct can_proto bcm_can_proto __read_mostly = { 1581static struct can_proto bcm_can_proto __read_mostly = {
1577 .type = SOCK_DGRAM, 1582 .type = SOCK_DGRAM,
1578 .protocol = CAN_BCM, 1583 .protocol = CAN_BCM,
1579 .capability = -1,
1580 .ops = &bcm_ops, 1584 .ops = &bcm_ops,
1581 .prot = &bcm_proto, 1585 .prot = &bcm_proto,
1582}; 1586};
diff --git a/net/can/raw.c b/net/can/raw.c
index 6e77db58b9e6..abca920440b5 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -742,7 +742,6 @@ static struct proto raw_proto __read_mostly = {
742static struct can_proto raw_can_proto __read_mostly = { 742static struct can_proto raw_can_proto __read_mostly = {
743 .type = SOCK_RAW, 743 .type = SOCK_RAW,
744 .protocol = CAN_RAW, 744 .protocol = CAN_RAW,
745 .capability = -1,
746 .ops = &raw_ops, 745 .ops = &raw_ops,
747 .prot = &raw_proto, 746 .prot = &raw_proto,
748}; 747};
diff --git a/net/compat.c b/net/compat.c
index 6a2f75fb3f45..e1a56ade803b 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -758,9 +758,13 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
758{ 758{
759 int datagrams; 759 int datagrams;
760 struct timespec ktspec; 760 struct timespec ktspec;
761 struct compat_timespec __user *utspec = 761 struct compat_timespec __user *utspec;
762 (struct compat_timespec __user *)timeout;
763 762
763 if (timeout == NULL)
764 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
765 flags | MSG_CMSG_COMPAT, NULL);
766
767 utspec = (struct compat_timespec __user *)timeout;
764 if (get_user(ktspec.tv_sec, &utspec->tv_sec) || 768 if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
765 get_user(ktspec.tv_nsec, &utspec->tv_nsec)) 769 get_user(ktspec.tv_nsec, &utspec->tv_nsec))
766 return -EFAULT; 770 return -EFAULT;
diff --git a/net/core/Makefile b/net/core/Makefile
index 796f46eece5f..08791ac3e05a 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -6,7 +6,6 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
6 gen_stats.o gen_estimator.o net_namespace.o 6 gen_stats.o gen_estimator.o net_namespace.o
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
10 9
11obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
12 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4d57f5e12b05..95c2e0840d0d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -224,6 +224,15 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
224 consume_skb(skb); 224 consume_skb(skb);
225 sk_mem_reclaim_partial(sk); 225 sk_mem_reclaim_partial(sk);
226} 226}
227EXPORT_SYMBOL(skb_free_datagram);
228
229void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
230{
231 lock_sock(sk);
232 skb_free_datagram(sk, skb);
233 release_sock(sk);
234}
235EXPORT_SYMBOL(skb_free_datagram_locked);
227 236
228/** 237/**
229 * skb_kill_datagram - Free a datagram skbuff forcibly 238 * skb_kill_datagram - Free a datagram skbuff forcibly
@@ -753,5 +762,4 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
753EXPORT_SYMBOL(datagram_poll); 762EXPORT_SYMBOL(datagram_poll);
754EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); 763EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
755EXPORT_SYMBOL(skb_copy_datagram_iovec); 764EXPORT_SYMBOL(skb_copy_datagram_iovec);
756EXPORT_SYMBOL(skb_free_datagram);
757EXPORT_SYMBOL(skb_recv_datagram); 765EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 68a1bb68b5a8..0913a08a87d6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -79,6 +79,7 @@
79#include <linux/cpu.h> 79#include <linux/cpu.h>
80#include <linux/types.h> 80#include <linux/types.h>
81#include <linux/kernel.h> 81#include <linux/kernel.h>
82#include <linux/hash.h>
82#include <linux/sched.h> 83#include <linux/sched.h>
83#include <linux/mutex.h> 84#include <linux/mutex.h>
84#include <linux/string.h> 85#include <linux/string.h>
@@ -104,6 +105,7 @@
104#include <net/dst.h> 105#include <net/dst.h>
105#include <net/pkt_sched.h> 106#include <net/pkt_sched.h>
106#include <net/checksum.h> 107#include <net/checksum.h>
108#include <net/xfrm.h>
107#include <linux/highmem.h> 109#include <linux/highmem.h>
108#include <linux/init.h> 110#include <linux/init.h>
109#include <linux/kmod.h> 111#include <linux/kmod.h>
@@ -175,7 +177,7 @@ static struct list_head ptype_all __read_mostly; /* Taps */
175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
176 * semaphore. 178 * semaphore.
177 * 179 *
178 * Pure readers hold dev_base_lock for reading. 180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
179 * 181 *
180 * Writers must hold the rtnl semaphore while they loop through the 182 * Writers must hold the rtnl semaphore while they loop through the
181 * dev_base_head list, and hold dev_base_lock for writing when they do the 183 * dev_base_head list, and hold dev_base_lock for writing when they do the
@@ -196,7 +198,7 @@ EXPORT_SYMBOL(dev_base_lock);
196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
197{ 199{
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)]; 201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
200} 202}
201 203
202static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
@@ -212,8 +214,8 @@ static int list_netdevice(struct net_device *dev)
212 ASSERT_RTNL(); 214 ASSERT_RTNL();
213 215
214 write_lock_bh(&dev_base_lock); 216 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head); 217 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); 218 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
217 hlist_add_head_rcu(&dev->index_hlist, 219 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex)); 220 dev_index_hash(net, dev->ifindex));
219 write_unlock_bh(&dev_base_lock); 221 write_unlock_bh(&dev_base_lock);
@@ -229,8 +231,8 @@ static void unlist_netdevice(struct net_device *dev)
229 231
230 /* Unlink dev from the device chain */ 232 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock); 233 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list); 234 list_del_rcu(&dev->dev_list);
233 hlist_del(&dev->name_hlist); 235 hlist_del_rcu(&dev->name_hlist);
234 hlist_del_rcu(&dev->index_hlist); 236 hlist_del_rcu(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock); 237 write_unlock_bh(&dev_base_lock);
236} 238}
@@ -587,18 +589,44 @@ __setup("netdev=", netdev_boot_setup);
587struct net_device *__dev_get_by_name(struct net *net, const char *name) 589struct net_device *__dev_get_by_name(struct net *net, const char *name)
588{ 590{
589 struct hlist_node *p; 591 struct hlist_node *p;
592 struct net_device *dev;
593 struct hlist_head *head = dev_name_hash(net, name);
590 594
591 hlist_for_each(p, dev_name_hash(net, name)) { 595 hlist_for_each_entry(dev, p, head, name_hlist)
592 struct net_device *dev
593 = hlist_entry(p, struct net_device, name_hlist);
594 if (!strncmp(dev->name, name, IFNAMSIZ)) 596 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev; 597 return dev;
596 } 598
597 return NULL; 599 return NULL;
598} 600}
599EXPORT_SYMBOL(__dev_get_by_name); 601EXPORT_SYMBOL(__dev_get_by_name);
600 602
601/** 603/**
604 * dev_get_by_name_rcu - find a device by its name
605 * @net: the applicable net namespace
606 * @name: name to find
607 *
608 * Find an interface by name.
609 * If the name is found a pointer to the device is returned.
610 * If the name is not found then %NULL is returned.
611 * The reference counters are not incremented so the caller must be
612 * careful with locks. The caller must hold RCU lock.
613 */
614
615struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
616{
617 struct hlist_node *p;
618 struct net_device *dev;
619 struct hlist_head *head = dev_name_hash(net, name);
620
621 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
622 if (!strncmp(dev->name, name, IFNAMSIZ))
623 return dev;
624
625 return NULL;
626}
627EXPORT_SYMBOL(dev_get_by_name_rcu);
628
629/**
602 * dev_get_by_name - find a device by its name 630 * dev_get_by_name - find a device by its name
603 * @net: the applicable net namespace 631 * @net: the applicable net namespace
604 * @name: name to find 632 * @name: name to find
@@ -614,11 +642,11 @@ struct net_device *dev_get_by_name(struct net *net, const char *name)
614{ 642{
615 struct net_device *dev; 643 struct net_device *dev;
616 644
617 read_lock(&dev_base_lock); 645 rcu_read_lock();
618 dev = __dev_get_by_name(net, name); 646 dev = dev_get_by_name_rcu(net, name);
619 if (dev) 647 if (dev)
620 dev_hold(dev); 648 dev_hold(dev);
621 read_unlock(&dev_base_lock); 649 rcu_read_unlock();
622 return dev; 650 return dev;
623} 651}
624EXPORT_SYMBOL(dev_get_by_name); 652EXPORT_SYMBOL(dev_get_by_name);
@@ -638,13 +666,13 @@ EXPORT_SYMBOL(dev_get_by_name);
638struct net_device *__dev_get_by_index(struct net *net, int ifindex) 666struct net_device *__dev_get_by_index(struct net *net, int ifindex)
639{ 667{
640 struct hlist_node *p; 668 struct hlist_node *p;
669 struct net_device *dev;
670 struct hlist_head *head = dev_index_hash(net, ifindex);
641 671
642 hlist_for_each(p, dev_index_hash(net, ifindex)) { 672 hlist_for_each_entry(dev, p, head, index_hlist)
643 struct net_device *dev
644 = hlist_entry(p, struct net_device, index_hlist);
645 if (dev->ifindex == ifindex) 673 if (dev->ifindex == ifindex)
646 return dev; 674 return dev;
647 } 675
648 return NULL; 676 return NULL;
649} 677}
650EXPORT_SYMBOL(__dev_get_by_index); 678EXPORT_SYMBOL(__dev_get_by_index);
@@ -773,15 +801,15 @@ struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
773 struct net_device *dev, *ret; 801 struct net_device *dev, *ret;
774 802
775 ret = NULL; 803 ret = NULL;
776 read_lock(&dev_base_lock); 804 rcu_read_lock();
777 for_each_netdev(net, dev) { 805 for_each_netdev_rcu(net, dev) {
778 if (((dev->flags ^ if_flags) & mask) == 0) { 806 if (((dev->flags ^ if_flags) & mask) == 0) {
779 dev_hold(dev); 807 dev_hold(dev);
780 ret = dev; 808 ret = dev;
781 break; 809 break;
782 } 810 }
783 } 811 }
784 read_unlock(&dev_base_lock); 812 rcu_read_unlock();
785 return ret; 813 return ret;
786} 814}
787EXPORT_SYMBOL(dev_get_by_flags); 815EXPORT_SYMBOL(dev_get_by_flags);
@@ -866,7 +894,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
866 free_page((unsigned long) inuse); 894 free_page((unsigned long) inuse);
867 } 895 }
868 896
869 snprintf(buf, IFNAMSIZ, name, i); 897 if (buf != name)
898 snprintf(buf, IFNAMSIZ, name, i);
870 if (!__dev_get_by_name(net, buf)) 899 if (!__dev_get_by_name(net, buf))
871 return i; 900 return i;
872 901
@@ -906,6 +935,21 @@ int dev_alloc_name(struct net_device *dev, const char *name)
906} 935}
907EXPORT_SYMBOL(dev_alloc_name); 936EXPORT_SYMBOL(dev_alloc_name);
908 937
938static int dev_get_valid_name(struct net *net, const char *name, char *buf,
939 bool fmt)
940{
941 if (!dev_valid_name(name))
942 return -EINVAL;
943
944 if (fmt && strchr(name, '%'))
945 return __dev_alloc_name(net, name, buf);
946 else if (__dev_get_by_name(net, name))
947 return -EEXIST;
948 else if (buf != name)
949 strlcpy(buf, name, IFNAMSIZ);
950
951 return 0;
952}
909 953
910/** 954/**
911 * dev_change_name - change name of a device 955 * dev_change_name - change name of a device
@@ -929,28 +973,20 @@ int dev_change_name(struct net_device *dev, const char *newname)
929 if (dev->flags & IFF_UP) 973 if (dev->flags & IFF_UP)
930 return -EBUSY; 974 return -EBUSY;
931 975
932 if (!dev_valid_name(newname))
933 return -EINVAL;
934
935 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 976 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
936 return 0; 977 return 0;
937 978
938 memcpy(oldname, dev->name, IFNAMSIZ); 979 memcpy(oldname, dev->name, IFNAMSIZ);
939 980
940 if (strchr(newname, '%')) { 981 err = dev_get_valid_name(net, newname, dev->name, 1);
941 err = dev_alloc_name(dev, newname); 982 if (err < 0)
942 if (err < 0) 983 return err;
943 return err;
944 } else if (__dev_get_by_name(net, newname))
945 return -EEXIST;
946 else
947 strlcpy(dev->name, newname, IFNAMSIZ);
948 984
949rollback: 985rollback:
950 /* For now only devices in the initial network namespace 986 /* For now only devices in the initial network namespace
951 * are in sysfs. 987 * are in sysfs.
952 */ 988 */
953 if (net == &init_net) { 989 if (net_eq(net, &init_net)) {
954 ret = device_rename(&dev->dev, dev->name); 990 ret = device_rename(&dev->dev, dev->name);
955 if (ret) { 991 if (ret) {
956 memcpy(dev->name, oldname, IFNAMSIZ); 992 memcpy(dev->name, oldname, IFNAMSIZ);
@@ -960,21 +996,27 @@ rollback:
960 996
961 write_lock_bh(&dev_base_lock); 997 write_lock_bh(&dev_base_lock);
962 hlist_del(&dev->name_hlist); 998 hlist_del(&dev->name_hlist);
963 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); 999 write_unlock_bh(&dev_base_lock);
1000
1001 synchronize_rcu();
1002
1003 write_lock_bh(&dev_base_lock);
1004 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
964 write_unlock_bh(&dev_base_lock); 1005 write_unlock_bh(&dev_base_lock);
965 1006
966 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1007 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
967 ret = notifier_to_errno(ret); 1008 ret = notifier_to_errno(ret);
968 1009
969 if (ret) { 1010 if (ret) {
970 if (err) { 1011 /* err >= 0 after dev_alloc_name() or stores the first errno */
971 printk(KERN_ERR 1012 if (err >= 0) {
972 "%s: name change rollback failed: %d.\n",
973 dev->name, ret);
974 } else {
975 err = ret; 1013 err = ret;
976 memcpy(dev->name, oldname, IFNAMSIZ); 1014 memcpy(dev->name, oldname, IFNAMSIZ);
977 goto rollback; 1015 goto rollback;
1016 } else {
1017 printk(KERN_ERR
1018 "%s: name change rollback failed: %d.\n",
1019 dev->name, ret);
978 } 1020 }
979 } 1021 }
980 1022
@@ -1062,9 +1104,9 @@ void dev_load(struct net *net, const char *name)
1062{ 1104{
1063 struct net_device *dev; 1105 struct net_device *dev;
1064 1106
1065 read_lock(&dev_base_lock); 1107 rcu_read_lock();
1066 dev = __dev_get_by_name(net, name); 1108 dev = dev_get_by_name_rcu(net, name);
1067 read_unlock(&dev_base_lock); 1109 rcu_read_unlock();
1068 1110
1069 if (!dev && capable(CAP_NET_ADMIN)) 1111 if (!dev && capable(CAP_NET_ADMIN))
1070 request_module("%s", name); 1112 request_module("%s", name);
@@ -1311,6 +1353,7 @@ rollback:
1311 nb->notifier_call(nb, NETDEV_DOWN, dev); 1353 nb->notifier_call(nb, NETDEV_DOWN, dev);
1312 } 1354 }
1313 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1355 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1356 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1314 } 1357 }
1315 } 1358 }
1316 1359
@@ -1377,6 +1420,45 @@ static inline void net_timestamp(struct sk_buff *skb)
1377 skb->tstamp.tv64 = 0; 1420 skb->tstamp.tv64 = 0;
1378} 1421}
1379 1422
1423/**
1424 * dev_forward_skb - loopback an skb to another netif
1425 *
1426 * @dev: destination network device
1427 * @skb: buffer to forward
1428 *
1429 * return values:
1430 * NET_RX_SUCCESS (no congestion)
1431 * NET_RX_DROP (packet was dropped)
1432 *
1433 * dev_forward_skb can be used for injecting an skb from the
1434 * start_xmit function of one device into the receive queue
1435 * of another device.
1436 *
1437 * The receiving device may be in another namespace, so
1438 * we have to clear all information in the skb that could
1439 * impact namespace isolation.
1440 */
1441int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1442{
1443 skb_orphan(skb);
1444
1445 if (!(dev->flags & IFF_UP))
1446 return NET_RX_DROP;
1447
1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP;
1450
1451 skb_dst_drop(skb);
1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb);
1459}
1460EXPORT_SYMBOL_GPL(dev_forward_skb);
1461
1380/* 1462/*
1381 * Support routine. Sends outgoing frames to any network 1463 * Support routine. Sends outgoing frames to any network
1382 * taps currently in use. 1464 * taps currently in use.
@@ -1725,7 +1807,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1725 struct netdev_queue *txq) 1807 struct netdev_queue *txq)
1726{ 1808{
1727 const struct net_device_ops *ops = dev->netdev_ops; 1809 const struct net_device_ops *ops = dev->netdev_ops;
1728 int rc; 1810 int rc = NETDEV_TX_OK;
1729 1811
1730 if (likely(!skb->next)) { 1812 if (likely(!skb->next)) {
1731 if (!list_empty(&ptype_all)) 1813 if (!list_empty(&ptype_all))
@@ -1773,6 +1855,8 @@ gso:
1773 nskb->next = NULL; 1855 nskb->next = NULL;
1774 rc = ops->ndo_start_xmit(nskb, dev); 1856 rc = ops->ndo_start_xmit(nskb, dev);
1775 if (unlikely(rc != NETDEV_TX_OK)) { 1857 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK)
1859 goto out_kfree_gso_skb;
1776 nskb->next = skb->next; 1860 nskb->next = skb->next;
1777 skb->next = nskb; 1861 skb->next = nskb;
1778 return rc; 1862 return rc;
@@ -1782,11 +1866,12 @@ gso:
1782 return NETDEV_TX_BUSY; 1866 return NETDEV_TX_BUSY;
1783 } while (skb->next); 1867 } while (skb->next);
1784 1868
1785 skb->destructor = DEV_GSO_CB(skb)->destructor; 1869out_kfree_gso_skb:
1786 1870 if (likely(skb->next == NULL))
1871 skb->destructor = DEV_GSO_CB(skb)->destructor;
1787out_kfree_skb: 1872out_kfree_skb:
1788 kfree_skb(skb); 1873 kfree_skb(skb);
1789 return NETDEV_TX_OK; 1874 return rc;
1790} 1875}
1791 1876
1792static u32 skb_tx_hashrnd; 1877static u32 skb_tx_hashrnd;
@@ -1813,6 +1898,20 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1813} 1898}
1814EXPORT_SYMBOL(skb_tx_hash); 1899EXPORT_SYMBOL(skb_tx_hash);
1815 1900
1901static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1902{
1903 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1904 if (net_ratelimit()) {
1905 WARN(1, "%s selects TX queue %d, but "
1906 "real number of TX queues is %d\n",
1907 dev->name, queue_index,
1908 dev->real_num_tx_queues);
1909 }
1910 return 0;
1911 }
1912 return queue_index;
1913}
1914
1816static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1915static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1817 struct sk_buff *skb) 1916 struct sk_buff *skb)
1818{ 1917{
@@ -1826,6 +1925,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1826 1925
1827 if (ops->ndo_select_queue) { 1926 if (ops->ndo_select_queue) {
1828 queue_index = ops->ndo_select_queue(dev, skb); 1927 queue_index = ops->ndo_select_queue(dev, skb);
1928 queue_index = dev_cap_txqueue(dev, queue_index);
1829 } else { 1929 } else {
1830 queue_index = 0; 1930 queue_index = 0;
1831 if (dev->real_num_tx_queues > 1) 1931 if (dev->real_num_tx_queues > 1)
@@ -1971,8 +2071,8 @@ gso:
1971 HARD_TX_LOCK(dev, txq, cpu); 2071 HARD_TX_LOCK(dev, txq, cpu);
1972 2072
1973 if (!netif_tx_queue_stopped(txq)) { 2073 if (!netif_tx_queue_stopped(txq)) {
1974 rc = NET_XMIT_SUCCESS; 2074 rc = dev_hard_start_xmit(skb, dev, txq);
1975 if (!dev_hard_start_xmit(skb, dev, txq)) { 2075 if (dev_xmit_complete(rc)) {
1976 HARD_TX_UNLOCK(dev, txq); 2076 HARD_TX_UNLOCK(dev, txq);
1977 goto out; 2077 goto out;
1978 } 2078 }
@@ -2227,7 +2327,7 @@ static int ing_filter(struct sk_buff *skb)
2227 if (MAX_RED_LOOP < ttl++) { 2327 if (MAX_RED_LOOP < ttl++) {
2228 printk(KERN_WARNING 2328 printk(KERN_WARNING
2229 "Redir loop detected Dropping packet (%d->%d)\n", 2329 "Redir loop detected Dropping packet (%d->%d)\n",
2230 skb->iif, dev->ifindex); 2330 skb->skb_iif, dev->ifindex);
2231 return TC_ACT_SHOT; 2331 return TC_ACT_SHOT;
2232 } 2332 }
2233 2333
@@ -2335,8 +2435,8 @@ int netif_receive_skb(struct sk_buff *skb)
2335 if (netpoll_receive_skb(skb)) 2435 if (netpoll_receive_skb(skb))
2336 return NET_RX_DROP; 2436 return NET_RX_DROP;
2337 2437
2338 if (!skb->iif) 2438 if (!skb->skb_iif)
2339 skb->iif = skb->dev->ifindex; 2439 skb->skb_iif = skb->dev->ifindex;
2340 2440
2341 null_or_orig = NULL; 2441 null_or_orig = NULL;
2342 orig_dev = skb->dev; 2442 orig_dev = skb->dev;
@@ -2476,7 +2576,7 @@ void napi_gro_flush(struct napi_struct *napi)
2476} 2576}
2477EXPORT_SYMBOL(napi_gro_flush); 2577EXPORT_SYMBOL(napi_gro_flush);
2478 2578
2479int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2480{ 2580{
2481 struct sk_buff **pp = NULL; 2581 struct sk_buff **pp = NULL;
2482 struct packet_type *ptype; 2582 struct packet_type *ptype;
@@ -2484,7 +2584,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2484 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2584 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2485 int same_flow; 2585 int same_flow;
2486 int mac_len; 2586 int mac_len;
2487 int ret; 2587 enum gro_result ret;
2488 2588
2489 if (!(skb->dev->features & NETIF_F_GRO)) 2589 if (!(skb->dev->features & NETIF_F_GRO))
2490 goto normal; 2590 goto normal;
@@ -2568,7 +2668,8 @@ normal:
2568} 2668}
2569EXPORT_SYMBOL(dev_gro_receive); 2669EXPORT_SYMBOL(dev_gro_receive);
2570 2670
2571static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2671static gro_result_t
2672__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2572{ 2673{
2573 struct sk_buff *p; 2674 struct sk_buff *p;
2574 2675
@@ -2576,33 +2677,35 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2576 return GRO_NORMAL; 2677 return GRO_NORMAL;
2577 2678
2578 for (p = napi->gro_list; p; p = p->next) { 2679 for (p = napi->gro_list; p; p = p->next) {
2579 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev) 2680 NAPI_GRO_CB(p)->same_flow =
2580 && !compare_ether_header(skb_mac_header(p), 2681 (p->dev == skb->dev) &&
2581 skb_gro_mac_header(skb)); 2682 !compare_ether_header(skb_mac_header(p),
2683 skb_gro_mac_header(skb));
2582 NAPI_GRO_CB(p)->flush = 0; 2684 NAPI_GRO_CB(p)->flush = 0;
2583 } 2685 }
2584 2686
2585 return dev_gro_receive(napi, skb); 2687 return dev_gro_receive(napi, skb);
2586} 2688}
2587 2689
2588int napi_skb_finish(int ret, struct sk_buff *skb) 2690gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2589{ 2691{
2590 int err = NET_RX_SUCCESS;
2591
2592 switch (ret) { 2692 switch (ret) {
2593 case GRO_NORMAL: 2693 case GRO_NORMAL:
2594 return netif_receive_skb(skb); 2694 if (netif_receive_skb(skb))
2695 ret = GRO_DROP;
2696 break;
2595 2697
2596 case GRO_DROP: 2698 case GRO_DROP:
2597 err = NET_RX_DROP;
2598 /* fall through */
2599
2600 case GRO_MERGED_FREE: 2699 case GRO_MERGED_FREE:
2601 kfree_skb(skb); 2700 kfree_skb(skb);
2602 break; 2701 break;
2702
2703 case GRO_HELD:
2704 case GRO_MERGED:
2705 break;
2603 } 2706 }
2604 2707
2605 return err; 2708 return ret;
2606} 2709}
2607EXPORT_SYMBOL(napi_skb_finish); 2710EXPORT_SYMBOL(napi_skb_finish);
2608 2711
@@ -2622,7 +2725,7 @@ void skb_gro_reset_offset(struct sk_buff *skb)
2622} 2725}
2623EXPORT_SYMBOL(skb_gro_reset_offset); 2726EXPORT_SYMBOL(skb_gro_reset_offset);
2624 2727
2625int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2728gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2626{ 2729{
2627 skb_gro_reset_offset(skb); 2730 skb_gro_reset_offset(skb);
2628 2731
@@ -2652,31 +2755,30 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
2652} 2755}
2653EXPORT_SYMBOL(napi_get_frags); 2756EXPORT_SYMBOL(napi_get_frags);
2654 2757
2655int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) 2758gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2759 gro_result_t ret)
2656{ 2760{
2657 int err = NET_RX_SUCCESS;
2658
2659 switch (ret) { 2761 switch (ret) {
2660 case GRO_NORMAL: 2762 case GRO_NORMAL:
2661 case GRO_HELD: 2763 case GRO_HELD:
2662 skb->protocol = eth_type_trans(skb, napi->dev); 2764 skb->protocol = eth_type_trans(skb, napi->dev);
2663 2765
2664 if (ret == GRO_NORMAL) 2766 if (ret == GRO_HELD)
2665 return netif_receive_skb(skb); 2767 skb_gro_pull(skb, -ETH_HLEN);
2666 2768 else if (netif_receive_skb(skb))
2667 skb_gro_pull(skb, -ETH_HLEN); 2769 ret = GRO_DROP;
2668 break; 2770 break;
2669 2771
2670 case GRO_DROP: 2772 case GRO_DROP:
2671 err = NET_RX_DROP;
2672 /* fall through */
2673
2674 case GRO_MERGED_FREE: 2773 case GRO_MERGED_FREE:
2675 napi_reuse_skb(napi, skb); 2774 napi_reuse_skb(napi, skb);
2676 break; 2775 break;
2776
2777 case GRO_MERGED:
2778 break;
2677 } 2779 }
2678 2780
2679 return err; 2781 return ret;
2680} 2782}
2681EXPORT_SYMBOL(napi_frags_finish); 2783EXPORT_SYMBOL(napi_frags_finish);
2682 2784
@@ -2717,12 +2819,12 @@ out:
2717} 2819}
2718EXPORT_SYMBOL(napi_frags_skb); 2820EXPORT_SYMBOL(napi_frags_skb);
2719 2821
2720int napi_gro_frags(struct napi_struct *napi) 2822gro_result_t napi_gro_frags(struct napi_struct *napi)
2721{ 2823{
2722 struct sk_buff *skb = napi_frags_skb(napi); 2824 struct sk_buff *skb = napi_frags_skb(napi);
2723 2825
2724 if (!skb) 2826 if (!skb)
2725 return NET_RX_DROP; 2827 return GRO_DROP;
2726 2828
2727 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); 2829 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2728} 2830}
@@ -3045,18 +3147,18 @@ static int dev_ifconf(struct net *net, char __user *arg)
3045 * in detail. 3147 * in detail.
3046 */ 3148 */
3047void *dev_seq_start(struct seq_file *seq, loff_t *pos) 3149void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3048 __acquires(dev_base_lock) 3150 __acquires(RCU)
3049{ 3151{
3050 struct net *net = seq_file_net(seq); 3152 struct net *net = seq_file_net(seq);
3051 loff_t off; 3153 loff_t off;
3052 struct net_device *dev; 3154 struct net_device *dev;
3053 3155
3054 read_lock(&dev_base_lock); 3156 rcu_read_lock();
3055 if (!*pos) 3157 if (!*pos)
3056 return SEQ_START_TOKEN; 3158 return SEQ_START_TOKEN;
3057 3159
3058 off = 1; 3160 off = 1;
3059 for_each_netdev(net, dev) 3161 for_each_netdev_rcu(net, dev)
3060 if (off++ == *pos) 3162 if (off++ == *pos)
3061 return dev; 3163 return dev;
3062 3164
@@ -3065,16 +3167,18 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3065 3167
3066void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3168void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3067{ 3169{
3068 struct net *net = seq_file_net(seq); 3170 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3171 first_net_device(seq_file_net(seq)) :
3172 next_net_device((struct net_device *)v);
3173
3069 ++*pos; 3174 ++*pos;
3070 return v == SEQ_START_TOKEN ? 3175 return rcu_dereference(dev);
3071 first_net_device(net) : next_net_device((struct net_device *)v);
3072} 3176}
3073 3177
3074void dev_seq_stop(struct seq_file *seq, void *v) 3178void dev_seq_stop(struct seq_file *seq, void *v)
3075 __releases(dev_base_lock) 3179 __releases(RCU)
3076{ 3180{
3077 read_unlock(&dev_base_lock); 3181 rcu_read_unlock();
3078} 3182}
3079 3183
3080static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 3184static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
@@ -4283,12 +4387,12 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4283EXPORT_SYMBOL(dev_set_mac_address); 4387EXPORT_SYMBOL(dev_set_mac_address);
4284 4388
4285/* 4389/*
4286 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) 4390 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4287 */ 4391 */
4288static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) 4392static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4289{ 4393{
4290 int err; 4394 int err;
4291 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); 4395 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4292 4396
4293 if (!dev) 4397 if (!dev)
4294 return -ENODEV; 4398 return -ENODEV;
@@ -4520,9 +4624,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4520 case SIOCGIFINDEX: 4624 case SIOCGIFINDEX:
4521 case SIOCGIFTXQLEN: 4625 case SIOCGIFTXQLEN:
4522 dev_load(net, ifr.ifr_name); 4626 dev_load(net, ifr.ifr_name);
4523 read_lock(&dev_base_lock); 4627 rcu_read_lock();
4524 ret = dev_ifsioc_locked(net, &ifr, cmd); 4628 ret = dev_ifsioc_locked(net, &ifr, cmd);
4525 read_unlock(&dev_base_lock); 4629 rcu_read_unlock();
4526 if (!ret) { 4630 if (!ret) {
4527 if (colon) 4631 if (colon)
4528 *colon = ':'; 4632 *colon = ':';
@@ -4723,6 +4827,10 @@ static void rollback_registered_many(struct list_head *head)
4723 netdev_unregister_kobject(dev); 4827 netdev_unregister_kobject(dev);
4724 } 4828 }
4725 4829
4830 /* Process any work delayed until the end of the batch */
4831 dev = list_entry(head->next, struct net_device, unreg_list);
4832 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4833
4726 synchronize_net(); 4834 synchronize_net();
4727 4835
4728 list_for_each_entry(dev, head, unreg_list) 4836 list_for_each_entry(dev, head, unreg_list)
@@ -4811,8 +4919,6 @@ EXPORT_SYMBOL(netdev_fix_features);
4811 4919
4812int register_netdevice(struct net_device *dev) 4920int register_netdevice(struct net_device *dev)
4813{ 4921{
4814 struct hlist_head *head;
4815 struct hlist_node *p;
4816 int ret; 4922 int ret;
4817 struct net *net = dev_net(dev); 4923 struct net *net = dev_net(dev);
4818 4924
@@ -4841,26 +4947,14 @@ int register_netdevice(struct net_device *dev)
4841 } 4947 }
4842 } 4948 }
4843 4949
4844 if (!dev_valid_name(dev->name)) { 4950 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4845 ret = -EINVAL; 4951 if (ret)
4846 goto err_uninit; 4952 goto err_uninit;
4847 }
4848 4953
4849 dev->ifindex = dev_new_index(net); 4954 dev->ifindex = dev_new_index(net);
4850 if (dev->iflink == -1) 4955 if (dev->iflink == -1)
4851 dev->iflink = dev->ifindex; 4956 dev->iflink = dev->ifindex;
4852 4957
4853 /* Check for existence of name */
4854 head = dev_name_hash(net, dev->name);
4855 hlist_for_each(p, head) {
4856 struct net_device *d
4857 = hlist_entry(p, struct net_device, name_hlist);
4858 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4859 ret = -EEXIST;
4860 goto err_uninit;
4861 }
4862 }
4863
4864 /* Fix illegal checksum combinations */ 4958 /* Fix illegal checksum combinations */
4865 if ((dev->features & NETIF_F_HW_CSUM) && 4959 if ((dev->features & NETIF_F_HW_CSUM) &&
4866 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 4960 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
@@ -5013,6 +5107,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5013{ 5107{
5014 unsigned long rebroadcast_time, warning_time; 5108 unsigned long rebroadcast_time, warning_time;
5015 5109
5110 linkwatch_forget_dev(dev);
5111
5016 rebroadcast_time = warning_time = jiffies; 5112 rebroadcast_time = warning_time = jiffies;
5017 while (atomic_read(&dev->refcnt) != 0) { 5113 while (atomic_read(&dev->refcnt) != 0) {
5018 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5114 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
@@ -5020,6 +5116,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5020 5116
5021 /* Rebroadcast unregister notification */ 5117 /* Rebroadcast unregister notification */
5022 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5118 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5119 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5120 * should have already handle it the first time */
5023 5121
5024 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5122 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5025 &dev->state)) { 5123 &dev->state)) {
@@ -5115,6 +5213,32 @@ void netdev_run_todo(void)
5115} 5213}
5116 5214
5117/** 5215/**
5216 * dev_txq_stats_fold - fold tx_queues stats
5217 * @dev: device to get statistics from
5218 * @stats: struct net_device_stats to hold results
5219 */
5220void dev_txq_stats_fold(const struct net_device *dev,
5221 struct net_device_stats *stats)
5222{
5223 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5224 unsigned int i;
5225 struct netdev_queue *txq;
5226
5227 for (i = 0; i < dev->num_tx_queues; i++) {
5228 txq = netdev_get_tx_queue(dev, i);
5229 tx_bytes += txq->tx_bytes;
5230 tx_packets += txq->tx_packets;
5231 tx_dropped += txq->tx_dropped;
5232 }
5233 if (tx_bytes || tx_packets || tx_dropped) {
5234 stats->tx_bytes = tx_bytes;
5235 stats->tx_packets = tx_packets;
5236 stats->tx_dropped = tx_dropped;
5237 }
5238}
5239EXPORT_SYMBOL(dev_txq_stats_fold);
5240
5241/**
5118 * dev_get_stats - get network device statistics 5242 * dev_get_stats - get network device statistics
5119 * @dev: device to get statistics from 5243 * @dev: device to get statistics from
5120 * 5244 *
@@ -5128,25 +5252,9 @@ const struct net_device_stats *dev_get_stats(struct net_device *dev)
5128 5252
5129 if (ops->ndo_get_stats) 5253 if (ops->ndo_get_stats)
5130 return ops->ndo_get_stats(dev); 5254 return ops->ndo_get_stats(dev);
5131 else { 5255
5132 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; 5256 dev_txq_stats_fold(dev, &dev->stats);
5133 struct net_device_stats *stats = &dev->stats; 5257 return &dev->stats;
5134 unsigned int i;
5135 struct netdev_queue *txq;
5136
5137 for (i = 0; i < dev->num_tx_queues; i++) {
5138 txq = netdev_get_tx_queue(dev, i);
5139 tx_bytes += txq->tx_bytes;
5140 tx_packets += txq->tx_packets;
5141 tx_dropped += txq->tx_dropped;
5142 }
5143 if (tx_bytes || tx_packets || tx_dropped) {
5144 stats->tx_bytes = tx_bytes;
5145 stats->tx_packets = tx_packets;
5146 stats->tx_dropped = tx_dropped;
5147 }
5148 return stats;
5149 }
5150} 5258}
5151EXPORT_SYMBOL(dev_get_stats); 5259EXPORT_SYMBOL(dev_get_stats);
5152 5260
@@ -5226,6 +5334,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5226 netdev_init_queues(dev); 5334 netdev_init_queues(dev);
5227 5335
5228 INIT_LIST_HEAD(&dev->napi_list); 5336 INIT_LIST_HEAD(&dev->napi_list);
5337 INIT_LIST_HEAD(&dev->unreg_list);
5338 INIT_LIST_HEAD(&dev->link_watch_list);
5229 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5339 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5230 setup(dev); 5340 setup(dev);
5231 strcpy(dev->name, name); 5341 strcpy(dev->name, name);
@@ -5293,7 +5403,7 @@ EXPORT_SYMBOL(synchronize_net);
5293 * unregister_netdevice_queue - remove device from the kernel 5403 * unregister_netdevice_queue - remove device from the kernel
5294 * @dev: device 5404 * @dev: device
5295 * @head: list 5405 * @head: list
5296 5406 *
5297 * This function shuts down a device interface and removes it 5407 * This function shuts down a device interface and removes it
5298 * from the kernel tables. 5408 * from the kernel tables.
5299 * If head not NULL, device is queued to be unregistered later. 5409 * If head not NULL, device is queued to be unregistered later.
@@ -5307,7 +5417,7 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5307 ASSERT_RTNL(); 5417 ASSERT_RTNL();
5308 5418
5309 if (head) { 5419 if (head) {
5310 list_add_tail(&dev->unreg_list, head); 5420 list_move_tail(&dev->unreg_list, head);
5311 } else { 5421 } else {
5312 rollback_registered(dev); 5422 rollback_registered(dev);
5313 /* Finish processing unregister after unlock */ 5423 /* Finish processing unregister after unlock */
@@ -5319,7 +5429,6 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
5319/** 5429/**
5320 * unregister_netdevice_many - unregister many devices 5430 * unregister_netdevice_many - unregister many devices
5321 * @head: list of devices 5431 * @head: list of devices
5322 *
5323 */ 5432 */
5324void unregister_netdevice_many(struct list_head *head) 5433void unregister_netdevice_many(struct list_head *head)
5325{ 5434{
@@ -5368,8 +5477,6 @@ EXPORT_SYMBOL(unregister_netdev);
5368 5477
5369int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 5478int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5370{ 5479{
5371 char buf[IFNAMSIZ];
5372 const char *destname;
5373 int err; 5480 int err;
5374 5481
5375 ASSERT_RTNL(); 5482 ASSERT_RTNL();
@@ -5402,20 +5509,11 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5402 * we can use it in the destination network namespace. 5509 * we can use it in the destination network namespace.
5403 */ 5510 */
5404 err = -EEXIST; 5511 err = -EEXIST;
5405 destname = dev->name; 5512 if (__dev_get_by_name(net, dev->name)) {
5406 if (__dev_get_by_name(net, destname)) {
5407 /* We get here if we can't use the current device name */ 5513 /* We get here if we can't use the current device name */
5408 if (!pat) 5514 if (!pat)
5409 goto out; 5515 goto out;
5410 if (!dev_valid_name(pat)) 5516 if (dev_get_valid_name(net, pat, dev->name, 1))
5411 goto out;
5412 if (strchr(pat, '%')) {
5413 if (__dev_alloc_name(net, pat, buf) < 0)
5414 goto out;
5415 destname = buf;
5416 } else
5417 destname = pat;
5418 if (__dev_get_by_name(net, destname))
5419 goto out; 5517 goto out;
5420 } 5518 }
5421 5519
@@ -5439,6 +5537,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5439 this device. They should clean all the things. 5537 this device. They should clean all the things.
5440 */ 5538 */
5441 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5539 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5540 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5442 5541
5443 /* 5542 /*
5444 * Flush the unicast and multicast chains 5543 * Flush the unicast and multicast chains
@@ -5451,10 +5550,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5451 /* Actually switch the network namespace */ 5550 /* Actually switch the network namespace */
5452 dev_net_set(dev, net); 5551 dev_net_set(dev, net);
5453 5552
5454 /* Assign the new device name */
5455 if (destname != dev->name)
5456 strcpy(dev->name, destname);
5457
5458 /* If there is an ifindex conflict assign a new one */ 5553 /* If there is an ifindex conflict assign a new one */
5459 if (__dev_get_by_index(net, dev->ifindex)) { 5554 if (__dev_get_by_index(net, dev->ifindex)) {
5460 int iflink = (dev->iflink == dev->ifindex); 5555 int iflink = (dev->iflink == dev->ifindex);
@@ -5641,14 +5736,13 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
5641 5736
5642static void __net_exit default_device_exit(struct net *net) 5737static void __net_exit default_device_exit(struct net *net)
5643{ 5738{
5644 struct net_device *dev; 5739 struct net_device *dev, *aux;
5645 /* 5740 /*
5646 * Push all migratable of the network devices back to the 5741 * Push all migratable network devices back to the
5647 * initial network namespace 5742 * initial network namespace
5648 */ 5743 */
5649 rtnl_lock(); 5744 rtnl_lock();
5650restart: 5745 for_each_netdev_safe(net, dev, aux) {
5651 for_each_netdev(net, dev) {
5652 int err; 5746 int err;
5653 char fb_name[IFNAMSIZ]; 5747 char fb_name[IFNAMSIZ];
5654 5748
@@ -5656,11 +5750,9 @@ restart:
5656 if (dev->features & NETIF_F_NETNS_LOCAL) 5750 if (dev->features & NETIF_F_NETNS_LOCAL)
5657 continue; 5751 continue;
5658 5752
5659 /* Delete virtual devices */ 5753 /* Leave virtual devices for the generic cleanup */
5660 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { 5754 if (dev->rtnl_link_ops)
5661 dev->rtnl_link_ops->dellink(dev, NULL); 5755 continue;
5662 goto restart;
5663 }
5664 5756
5665 /* Push remaing network devices to init_net */ 5757 /* Push remaing network devices to init_net */
5666 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 5758 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
@@ -5670,13 +5762,37 @@ restart:
5670 __func__, dev->name, err); 5762 __func__, dev->name, err);
5671 BUG(); 5763 BUG();
5672 } 5764 }
5673 goto restart;
5674 } 5765 }
5675 rtnl_unlock(); 5766 rtnl_unlock();
5676} 5767}
5677 5768
5769static void __net_exit default_device_exit_batch(struct list_head *net_list)
5770{
5771 /* At exit all network devices most be removed from a network
5772 * namespace. Do this in the reverse order of registeration.
5773 * Do this across as many network namespaces as possible to
5774 * improve batching efficiency.
5775 */
5776 struct net_device *dev;
5777 struct net *net;
5778 LIST_HEAD(dev_kill_list);
5779
5780 rtnl_lock();
5781 list_for_each_entry(net, net_list, exit_list) {
5782 for_each_netdev_reverse(net, dev) {
5783 if (dev->rtnl_link_ops)
5784 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5785 else
5786 unregister_netdevice_queue(dev, &dev_kill_list);
5787 }
5788 }
5789 unregister_netdevice_many(&dev_kill_list);
5790 rtnl_unlock();
5791}
5792
5678static struct pernet_operations __net_initdata default_device_ops = { 5793static struct pernet_operations __net_initdata default_device_ops = {
5679 .exit = default_device_exit, 5794 .exit = default_device_exit,
5795 .exit_batch = default_device_exit_batch,
5680}; 5796};
5681 5797
5682/* 5798/*
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 0a113f26bc9f..b8e9d3a86887 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -41,7 +41,7 @@ static void send_dm_alert(struct work_struct *unused);
41 * netlink alerts 41 * netlink alerts
42 */ 42 */
43static int trace_state = TRACE_OFF; 43static int trace_state = TRACE_OFF;
44static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED; 44static DEFINE_SPINLOCK(trace_state_lock);
45 45
46struct per_cpu_dm_data { 46struct per_cpu_dm_data {
47 struct work_struct dm_alert_work; 47 struct work_struct dm_alert_work;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index bd309384f8b8..02a3b2c69c1e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -72,7 +72,7 @@ static void flush_route_cache(struct fib_rules_ops *ops)
72 ops->flush_cache(ops); 72 ops->flush_cache(ops);
73} 73}
74 74
75int fib_rules_register(struct fib_rules_ops *ops) 75static int __fib_rules_register(struct fib_rules_ops *ops)
76{ 76{
77 int err = -EEXIST; 77 int err = -EEXIST;
78 struct fib_rules_ops *o; 78 struct fib_rules_ops *o;
@@ -102,6 +102,28 @@ errout:
102 return err; 102 return err;
103} 103}
104 104
105struct fib_rules_ops *
106fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
107{
108 struct fib_rules_ops *ops;
109 int err;
110
111 ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL);
112 if (ops == NULL)
113 return ERR_PTR(-ENOMEM);
114
115 INIT_LIST_HEAD(&ops->rules_list);
116 ops->fro_net = net;
117
118 err = __fib_rules_register(ops);
119 if (err) {
120 kfree(ops);
121 ops = ERR_PTR(err);
122 }
123
124 return ops;
125}
126
105EXPORT_SYMBOL_GPL(fib_rules_register); 127EXPORT_SYMBOL_GPL(fib_rules_register);
106 128
107void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 129void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
@@ -115,6 +137,15 @@ void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
115} 137}
116EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops); 138EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops);
117 139
140static void fib_rules_put_rcu(struct rcu_head *head)
141{
142 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
143 struct net *net = ops->fro_net;
144
145 release_net(net);
146 kfree(ops);
147}
148
118void fib_rules_unregister(struct fib_rules_ops *ops) 149void fib_rules_unregister(struct fib_rules_ops *ops)
119{ 150{
120 struct net *net = ops->fro_net; 151 struct net *net = ops->fro_net;
@@ -124,8 +155,7 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
124 fib_rules_cleanup_ops(ops); 155 fib_rules_cleanup_ops(ops);
125 spin_unlock(&net->rules_mod_lock); 156 spin_unlock(&net->rules_mod_lock);
126 157
127 synchronize_rcu(); 158 call_rcu(&ops->rcu, fib_rules_put_rcu);
128 release_net(net);
129} 159}
130 160
131EXPORT_SYMBOL_GPL(fib_rules_unregister); 161EXPORT_SYMBOL_GPL(fib_rules_unregister);
@@ -135,7 +165,10 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
135{ 165{
136 int ret = 0; 166 int ret = 0;
137 167
138 if (rule->ifindex && (rule->ifindex != fl->iif)) 168 if (rule->iifindex && (rule->iifindex != fl->iif))
169 goto out;
170
171 if (rule->oifindex && (rule->oifindex != fl->oif))
139 goto out; 172 goto out;
140 173
141 if ((rule->mark ^ fl->mark) & rule->mark_mask) 174 if ((rule->mark ^ fl->mark) & rule->mark_mask)
@@ -248,14 +281,24 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
248 if (tb[FRA_PRIORITY]) 281 if (tb[FRA_PRIORITY])
249 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); 282 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
250 283
251 if (tb[FRA_IFNAME]) { 284 if (tb[FRA_IIFNAME]) {
285 struct net_device *dev;
286
287 rule->iifindex = -1;
288 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
289 dev = __dev_get_by_name(net, rule->iifname);
290 if (dev)
291 rule->iifindex = dev->ifindex;
292 }
293
294 if (tb[FRA_OIFNAME]) {
252 struct net_device *dev; 295 struct net_device *dev;
253 296
254 rule->ifindex = -1; 297 rule->oifindex = -1;
255 nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ); 298 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
256 dev = __dev_get_by_name(net, rule->ifname); 299 dev = __dev_get_by_name(net, rule->oifname);
257 if (dev) 300 if (dev)
258 rule->ifindex = dev->ifindex; 301 rule->oifindex = dev->ifindex;
259 } 302 }
260 303
261 if (tb[FRA_FWMARK]) { 304 if (tb[FRA_FWMARK]) {
@@ -274,7 +317,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
274 rule->flags = frh->flags; 317 rule->flags = frh->flags;
275 rule->table = frh_get_table(frh, tb); 318 rule->table = frh_get_table(frh, tb);
276 319
277 if (!rule->pref && ops->default_pref) 320 if (!tb[FRA_PRIORITY] && ops->default_pref)
278 rule->pref = ops->default_pref(ops); 321 rule->pref = ops->default_pref(ops);
279 322
280 err = -EINVAL; 323 err = -EINVAL;
@@ -388,8 +431,12 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
388 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 431 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
389 continue; 432 continue;
390 433
391 if (tb[FRA_IFNAME] && 434 if (tb[FRA_IIFNAME] &&
392 nla_strcmp(tb[FRA_IFNAME], rule->ifname)) 435 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
436 continue;
437
438 if (tb[FRA_OIFNAME] &&
439 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
393 continue; 440 continue;
394 441
395 if (tb[FRA_FWMARK] && 442 if (tb[FRA_FWMARK] &&
@@ -447,7 +494,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
447 struct fib_rule *rule) 494 struct fib_rule *rule)
448{ 495{
449 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 496 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
450 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */ 497 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
498 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
451 + nla_total_size(4) /* FRA_PRIORITY */ 499 + nla_total_size(4) /* FRA_PRIORITY */
452 + nla_total_size(4) /* FRA_TABLE */ 500 + nla_total_size(4) /* FRA_TABLE */
453 + nla_total_size(4) /* FRA_FWMARK */ 501 + nla_total_size(4) /* FRA_FWMARK */
@@ -481,11 +529,18 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
481 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL) 529 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
482 frh->flags |= FIB_RULE_UNRESOLVED; 530 frh->flags |= FIB_RULE_UNRESOLVED;
483 531
484 if (rule->ifname[0]) { 532 if (rule->iifname[0]) {
485 NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname); 533 NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
486 534
487 if (rule->ifindex == -1) 535 if (rule->iifindex == -1)
488 frh->flags |= FIB_RULE_DEV_DETACHED; 536 frh->flags |= FIB_RULE_IIF_DETACHED;
537 }
538
539 if (rule->oifname[0]) {
540 NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
541
542 if (rule->oifindex == -1)
543 frh->flags |= FIB_RULE_OIF_DETACHED;
489 } 544 }
490 545
491 if (rule->pref) 546 if (rule->pref)
@@ -600,9 +655,12 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
600 struct fib_rule *rule; 655 struct fib_rule *rule;
601 656
602 list_for_each_entry(rule, rules, list) { 657 list_for_each_entry(rule, rules, list) {
603 if (rule->ifindex == -1 && 658 if (rule->iifindex == -1 &&
604 strcmp(dev->name, rule->ifname) == 0) 659 strcmp(dev->name, rule->iifname) == 0)
605 rule->ifindex = dev->ifindex; 660 rule->iifindex = dev->ifindex;
661 if (rule->oifindex == -1 &&
662 strcmp(dev->name, rule->oifname) == 0)
663 rule->oifindex = dev->ifindex;
606 } 664 }
607} 665}
608 666
@@ -610,9 +668,12 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
610{ 668{
611 struct fib_rule *rule; 669 struct fib_rule *rule;
612 670
613 list_for_each_entry(rule, rules, list) 671 list_for_each_entry(rule, rules, list) {
614 if (rule->ifindex == dev->ifindex) 672 if (rule->iifindex == dev->ifindex)
615 rule->ifindex = -1; 673 rule->iifindex = -1;
674 if (rule->oifindex == dev->ifindex)
675 rule->oifindex = -1;
676 }
616} 677}
617 678
618 679
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index bf8f7af699d7..5910b555a54a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_nextevent;
35static void linkwatch_event(struct work_struct *dummy); 35static void linkwatch_event(struct work_struct *dummy);
36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); 36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
37 37
38static struct net_device *lweventlist; 38static LIST_HEAD(lweventlist);
39static DEFINE_SPINLOCK(lweventlist_lock); 39static DEFINE_SPINLOCK(lweventlist_lock);
40 40
41static unsigned char default_operstate(const struct net_device *dev) 41static unsigned char default_operstate(const struct net_device *dev)
@@ -89,8 +89,10 @@ static void linkwatch_add_event(struct net_device *dev)
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&lweventlist_lock, flags); 91 spin_lock_irqsave(&lweventlist_lock, flags);
92 dev->link_watch_next = lweventlist; 92 if (list_empty(&dev->link_watch_list)) {
93 lweventlist = dev; 93 list_add_tail(&dev->link_watch_list, &lweventlist);
94 dev_hold(dev);
95 }
94 spin_unlock_irqrestore(&lweventlist_lock, flags); 96 spin_unlock_irqrestore(&lweventlist_lock, flags);
95} 97}
96 98
@@ -133,9 +135,35 @@ static void linkwatch_schedule_work(int urgent)
133} 135}
134 136
135 137
138static void linkwatch_do_dev(struct net_device *dev)
139{
140 /*
141 * Make sure the above read is complete since it can be
142 * rewritten as soon as we clear the bit below.
143 */
144 smp_mb__before_clear_bit();
145
146 /* We are about to handle this device,
147 * so new events can be accepted
148 */
149 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
150
151 rfc2863_policy(dev);
152 if (dev->flags & IFF_UP) {
153 if (netif_carrier_ok(dev))
154 dev_activate(dev);
155 else
156 dev_deactivate(dev);
157
158 netdev_state_change(dev);
159 }
160 dev_put(dev);
161}
162
136static void __linkwatch_run_queue(int urgent_only) 163static void __linkwatch_run_queue(int urgent_only)
137{ 164{
138 struct net_device *next; 165 struct net_device *dev;
166 LIST_HEAD(wrk);
139 167
140 /* 168 /*
141 * Limit the number of linkwatch events to one 169 * Limit the number of linkwatch events to one
@@ -153,46 +181,40 @@ static void __linkwatch_run_queue(int urgent_only)
153 clear_bit(LW_URGENT, &linkwatch_flags); 181 clear_bit(LW_URGENT, &linkwatch_flags);
154 182
155 spin_lock_irq(&lweventlist_lock); 183 spin_lock_irq(&lweventlist_lock);
156 next = lweventlist; 184 list_splice_init(&lweventlist, &wrk);
157 lweventlist = NULL;
158 spin_unlock_irq(&lweventlist_lock);
159 185
160 while (next) { 186 while (!list_empty(&wrk)) {
161 struct net_device *dev = next;
162 187
163 next = dev->link_watch_next; 188 dev = list_first_entry(&wrk, struct net_device, link_watch_list);
189 list_del_init(&dev->link_watch_list);
164 190
165 if (urgent_only && !linkwatch_urgent_event(dev)) { 191 if (urgent_only && !linkwatch_urgent_event(dev)) {
166 linkwatch_add_event(dev); 192 list_add_tail(&dev->link_watch_list, &lweventlist);
167 continue; 193 continue;
168 } 194 }
169 195 spin_unlock_irq(&lweventlist_lock);
170 /* 196 linkwatch_do_dev(dev);
171 * Make sure the above read is complete since it can be 197 spin_lock_irq(&lweventlist_lock);
172 * rewritten as soon as we clear the bit below.
173 */
174 smp_mb__before_clear_bit();
175
176 /* We are about to handle this device,
177 * so new events can be accepted
178 */
179 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
180
181 rfc2863_policy(dev);
182 if (dev->flags & IFF_UP) {
183 if (netif_carrier_ok(dev))
184 dev_activate(dev);
185 else
186 dev_deactivate(dev);
187
188 netdev_state_change(dev);
189 }
190
191 dev_put(dev);
192 } 198 }
193 199
194 if (lweventlist) 200 if (!list_empty(&lweventlist))
195 linkwatch_schedule_work(0); 201 linkwatch_schedule_work(0);
202 spin_unlock_irq(&lweventlist_lock);
203}
204
205void linkwatch_forget_dev(struct net_device *dev)
206{
207 unsigned long flags;
208 int clean = 0;
209
210 spin_lock_irqsave(&lweventlist_lock, flags);
211 if (!list_empty(&dev->link_watch_list)) {
212 list_del_init(&dev->link_watch_list);
213 clean = 1;
214 }
215 spin_unlock_irqrestore(&lweventlist_lock, flags);
216 if (clean)
217 linkwatch_do_dev(dev);
196} 218}
197 219
198 220
@@ -216,8 +238,6 @@ void linkwatch_fire_event(struct net_device *dev)
216 bool urgent = linkwatch_urgent_event(dev); 238 bool urgent = linkwatch_urgent_event(dev);
217 239
218 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 240 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
219 dev_hold(dev);
220
221 linkwatch_add_event(dev); 241 linkwatch_add_event(dev);
222 } else if (!urgent) 242 } else if (!urgent)
223 return; 243 return;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e587e6819698..a08a35bf0a7b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2092,7 +2092,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2092 if (h > s_h) 2092 if (h > s_h)
2093 s_idx = 0; 2093 s_idx = 0;
2094 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2094 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2095 if (dev_net(n->dev) != net) 2095 if (!net_eq(dev_net(n->dev), net))
2096 continue; 2096 continue;
2097 if (idx < s_idx) 2097 if (idx < s_idx)
2098 goto next; 2098 goto next;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 89de182353b0..fbc1c7472c5e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -525,7 +525,7 @@ void netdev_unregister_kobject(struct net_device * net)
525 525
526 kobject_get(&dev->kobj); 526 kobject_get(&dev->kobj);
527 527
528 if (dev_net(net) != &init_net) 528 if (!net_eq(dev_net(net), &init_net))
529 return; 529 return;
530 530
531 device_del(dev); 531 device_del(dev);
@@ -544,8 +544,11 @@ int netdev_register_kobject(struct net_device *net)
544 dev_set_name(dev, "%s", net->name); 544 dev_set_name(dev, "%s", net->name);
545 545
546#ifdef CONFIG_SYSFS 546#ifdef CONFIG_SYSFS
547 *groups++ = &netstat_group; 547 /* Allow for a device specific group */
548 if (*groups)
549 groups++;
548 550
551 *groups++ = &netstat_group;
549#ifdef CONFIG_WIRELESS_EXT_SYSFS 552#ifdef CONFIG_WIRELESS_EXT_SYSFS
550 if (net->ieee80211_ptr) 553 if (net->ieee80211_ptr)
551 *groups++ = &wireless_group; 554 *groups++ = &wireless_group;
@@ -556,7 +559,7 @@ int netdev_register_kobject(struct net_device *net)
556#endif 559#endif
557#endif /* CONFIG_SYSFS */ 560#endif /* CONFIG_SYSFS */
558 561
559 if (dev_net(net) != &init_net) 562 if (!net_eq(dev_net(net), &init_net))
560 return 0; 563 return 0;
561 564
562 return device_add(dev); 565 return device_add(dev);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 1c1af2756f38..bd8c4712ea24 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,14 +27,64 @@ EXPORT_SYMBOL(init_net);
27 27
28#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 28#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
29 29
30static int ops_init(const struct pernet_operations *ops, struct net *net)
31{
32 int err;
33 if (ops->id && ops->size) {
34 void *data = kzalloc(ops->size, GFP_KERNEL);
35 if (!data)
36 return -ENOMEM;
37
38 err = net_assign_generic(net, *ops->id, data);
39 if (err) {
40 kfree(data);
41 return err;
42 }
43 }
44 if (ops->init)
45 return ops->init(net);
46 return 0;
47}
48
49static void ops_free(const struct pernet_operations *ops, struct net *net)
50{
51 if (ops->id && ops->size) {
52 int id = *ops->id;
53 kfree(net_generic(net, id));
54 }
55}
56
57static void ops_exit_list(const struct pernet_operations *ops,
58 struct list_head *net_exit_list)
59{
60 struct net *net;
61 if (ops->exit) {
62 list_for_each_entry(net, net_exit_list, exit_list)
63 ops->exit(net);
64 }
65 if (ops->exit_batch)
66 ops->exit_batch(net_exit_list);
67}
68
69static void ops_free_list(const struct pernet_operations *ops,
70 struct list_head *net_exit_list)
71{
72 struct net *net;
73 if (ops->size && ops->id) {
74 list_for_each_entry(net, net_exit_list, exit_list)
75 ops_free(ops, net);
76 }
77}
78
30/* 79/*
31 * setup_net runs the initializers for the network namespace object. 80 * setup_net runs the initializers for the network namespace object.
32 */ 81 */
33static __net_init int setup_net(struct net *net) 82static __net_init int setup_net(struct net *net)
34{ 83{
35 /* Must be called with net_mutex held */ 84 /* Must be called with net_mutex held */
36 struct pernet_operations *ops; 85 const struct pernet_operations *ops, *saved_ops;
37 int error = 0; 86 int error = 0;
87 LIST_HEAD(net_exit_list);
38 88
39 atomic_set(&net->count, 1); 89 atomic_set(&net->count, 1);
40 90
@@ -43,11 +93,9 @@ static __net_init int setup_net(struct net *net)
43#endif 93#endif
44 94
45 list_for_each_entry(ops, &pernet_list, list) { 95 list_for_each_entry(ops, &pernet_list, list) {
46 if (ops->init) { 96 error = ops_init(ops, net);
47 error = ops->init(net); 97 if (error < 0)
48 if (error < 0) 98 goto out_undo;
49 goto out_undo;
50 }
51 } 99 }
52out: 100out:
53 return error; 101 return error;
@@ -56,10 +104,14 @@ out_undo:
56 /* Walk through the list backwards calling the exit functions 104 /* Walk through the list backwards calling the exit functions
57 * for the pernet modules whose init functions did not fail. 105 * for the pernet modules whose init functions did not fail.
58 */ 106 */
59 list_for_each_entry_continue_reverse(ops, &pernet_list, list) { 107 list_add(&net->exit_list, &net_exit_list);
60 if (ops->exit) 108 saved_ops = ops;
61 ops->exit(net); 109 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
62 } 110 ops_exit_list(ops, &net_exit_list);
111
112 ops = saved_ops;
113 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
114 ops_free_list(ops, &net_exit_list);
63 115
64 rcu_barrier(); 116 rcu_barrier();
65 goto out; 117 goto out;
@@ -147,18 +199,29 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
147 return net_create(); 199 return net_create();
148} 200}
149 201
202static DEFINE_SPINLOCK(cleanup_list_lock);
203static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
204
150static void cleanup_net(struct work_struct *work) 205static void cleanup_net(struct work_struct *work)
151{ 206{
152 struct pernet_operations *ops; 207 const struct pernet_operations *ops;
153 struct net *net; 208 struct net *net, *tmp;
209 LIST_HEAD(net_kill_list);
210 LIST_HEAD(net_exit_list);
154 211
155 net = container_of(work, struct net, work); 212 /* Atomically snapshot the list of namespaces to cleanup */
213 spin_lock_irq(&cleanup_list_lock);
214 list_replace_init(&cleanup_list, &net_kill_list);
215 spin_unlock_irq(&cleanup_list_lock);
156 216
157 mutex_lock(&net_mutex); 217 mutex_lock(&net_mutex);
158 218
159 /* Don't let anyone else find us. */ 219 /* Don't let anyone else find us. */
160 rtnl_lock(); 220 rtnl_lock();
161 list_del_rcu(&net->list); 221 list_for_each_entry(net, &net_kill_list, cleanup_list) {
222 list_del_rcu(&net->list);
223 list_add_tail(&net->exit_list, &net_exit_list);
224 }
162 rtnl_unlock(); 225 rtnl_unlock();
163 226
164 /* 227 /*
@@ -169,10 +232,12 @@ static void cleanup_net(struct work_struct *work)
169 synchronize_rcu(); 232 synchronize_rcu();
170 233
171 /* Run all of the network namespace exit methods */ 234 /* Run all of the network namespace exit methods */
172 list_for_each_entry_reverse(ops, &pernet_list, list) { 235 list_for_each_entry_reverse(ops, &pernet_list, list)
173 if (ops->exit) 236 ops_exit_list(ops, &net_exit_list);
174 ops->exit(net); 237
175 } 238 /* Free the net generic variables */
239 list_for_each_entry_reverse(ops, &pernet_list, list)
240 ops_free_list(ops, &net_exit_list);
176 241
177 mutex_unlock(&net_mutex); 242 mutex_unlock(&net_mutex);
178 243
@@ -182,14 +247,23 @@ static void cleanup_net(struct work_struct *work)
182 rcu_barrier(); 247 rcu_barrier();
183 248
184 /* Finally it is safe to free my network namespace structure */ 249 /* Finally it is safe to free my network namespace structure */
185 net_free(net); 250 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
251 list_del_init(&net->exit_list);
252 net_free(net);
253 }
186} 254}
255static DECLARE_WORK(net_cleanup_work, cleanup_net);
187 256
188void __put_net(struct net *net) 257void __put_net(struct net *net)
189{ 258{
190 /* Cleanup the network namespace in process context */ 259 /* Cleanup the network namespace in process context */
191 INIT_WORK(&net->work, cleanup_net); 260 unsigned long flags;
192 queue_work(netns_wq, &net->work); 261
262 spin_lock_irqsave(&cleanup_list_lock, flags);
263 list_add(&net->cleanup_list, &cleanup_list);
264 spin_unlock_irqrestore(&cleanup_list_lock, flags);
265
266 queue_work(netns_wq, &net_cleanup_work);
193} 267}
194EXPORT_SYMBOL_GPL(__put_net); 268EXPORT_SYMBOL_GPL(__put_net);
195 269
@@ -259,18 +333,20 @@ static int __init net_ns_init(void)
259pure_initcall(net_ns_init); 333pure_initcall(net_ns_init);
260 334
261#ifdef CONFIG_NET_NS 335#ifdef CONFIG_NET_NS
262static int register_pernet_operations(struct list_head *list, 336static int __register_pernet_operations(struct list_head *list,
263 struct pernet_operations *ops) 337 struct pernet_operations *ops)
264{ 338{
265 struct net *net, *undo_net; 339 struct net *net;
266 int error; 340 int error;
341 LIST_HEAD(net_exit_list);
267 342
268 list_add_tail(&ops->list, list); 343 list_add_tail(&ops->list, list);
269 if (ops->init) { 344 if (ops->init || (ops->id && ops->size)) {
270 for_each_net(net) { 345 for_each_net(net) {
271 error = ops->init(net); 346 error = ops_init(ops, net);
272 if (error) 347 if (error)
273 goto out_undo; 348 goto out_undo;
349 list_add_tail(&net->exit_list, &net_exit_list);
274 } 350 }
275 } 351 }
276 return 0; 352 return 0;
@@ -278,45 +354,82 @@ static int register_pernet_operations(struct list_head *list,
278out_undo: 354out_undo:
279 /* If I have an error cleanup all namespaces I initialized */ 355 /* If I have an error cleanup all namespaces I initialized */
280 list_del(&ops->list); 356 list_del(&ops->list);
281 if (ops->exit) { 357 ops_exit_list(ops, &net_exit_list);
282 for_each_net(undo_net) { 358 ops_free_list(ops, &net_exit_list);
283 if (undo_net == net)
284 goto undone;
285 ops->exit(undo_net);
286 }
287 }
288undone:
289 return error; 359 return error;
290} 360}
291 361
292static void unregister_pernet_operations(struct pernet_operations *ops) 362static void __unregister_pernet_operations(struct pernet_operations *ops)
293{ 363{
294 struct net *net; 364 struct net *net;
365 LIST_HEAD(net_exit_list);
295 366
296 list_del(&ops->list); 367 list_del(&ops->list);
297 if (ops->exit) 368 for_each_net(net)
298 for_each_net(net) 369 list_add_tail(&net->exit_list, &net_exit_list);
299 ops->exit(net); 370 ops_exit_list(ops, &net_exit_list);
371 ops_free_list(ops, &net_exit_list);
300} 372}
301 373
302#else 374#else
303 375
376static int __register_pernet_operations(struct list_head *list,
377 struct pernet_operations *ops)
378{
379 int err = 0;
380 err = ops_init(ops, &init_net);
381 if (err)
382 ops_free(ops, &init_net);
383 return err;
384
385}
386
387static void __unregister_pernet_operations(struct pernet_operations *ops)
388{
389 LIST_HEAD(net_exit_list);
390 list_add(&init_net.exit_list, &net_exit_list);
391 ops_exit_list(ops, &net_exit_list);
392 ops_free_list(ops, &net_exit_list);
393}
394
395#endif /* CONFIG_NET_NS */
396
397static DEFINE_IDA(net_generic_ids);
398
304static int register_pernet_operations(struct list_head *list, 399static int register_pernet_operations(struct list_head *list,
305 struct pernet_operations *ops) 400 struct pernet_operations *ops)
306{ 401{
307 if (ops->init == NULL) 402 int error;
308 return 0; 403
309 return ops->init(&init_net); 404 if (ops->id) {
405again:
406 error = ida_get_new_above(&net_generic_ids, 1, ops->id);
407 if (error < 0) {
408 if (error == -EAGAIN) {
409 ida_pre_get(&net_generic_ids, GFP_KERNEL);
410 goto again;
411 }
412 return error;
413 }
414 }
415 error = __register_pernet_operations(list, ops);
416 if (error) {
417 rcu_barrier();
418 if (ops->id)
419 ida_remove(&net_generic_ids, *ops->id);
420 }
421
422 return error;
310} 423}
311 424
312static void unregister_pernet_operations(struct pernet_operations *ops) 425static void unregister_pernet_operations(struct pernet_operations *ops)
313{ 426{
314 if (ops->exit) 427
315 ops->exit(&init_net); 428 __unregister_pernet_operations(ops);
429 rcu_barrier();
430 if (ops->id)
431 ida_remove(&net_generic_ids, *ops->id);
316} 432}
317#endif
318
319static DEFINE_IDA(net_generic_ids);
320 433
321/** 434/**
322 * register_pernet_subsys - register a network namespace subsystem 435 * register_pernet_subsys - register a network namespace subsystem
@@ -364,38 +477,6 @@ void unregister_pernet_subsys(struct pernet_operations *module)
364} 477}
365EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 478EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
366 479
367int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
368{
369 int rv;
370
371 mutex_lock(&net_mutex);
372again:
373 rv = ida_get_new_above(&net_generic_ids, 1, id);
374 if (rv < 0) {
375 if (rv == -EAGAIN) {
376 ida_pre_get(&net_generic_ids, GFP_KERNEL);
377 goto again;
378 }
379 goto out;
380 }
381 rv = register_pernet_operations(first_device, ops);
382 if (rv < 0)
383 ida_remove(&net_generic_ids, *id);
384out:
385 mutex_unlock(&net_mutex);
386 return rv;
387}
388EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
389
390void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
391{
392 mutex_lock(&net_mutex);
393 unregister_pernet_operations(ops);
394 ida_remove(&net_generic_ids, id);
395 mutex_unlock(&net_mutex);
396}
397EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);
398
399/** 480/**
400 * register_pernet_device - register a network namespace device 481 * register_pernet_device - register a network namespace device
401 * @ops: pernet operations structure for the subsystem 482 * @ops: pernet operations structure for the subsystem
@@ -427,30 +508,6 @@ int register_pernet_device(struct pernet_operations *ops)
427} 508}
428EXPORT_SYMBOL_GPL(register_pernet_device); 509EXPORT_SYMBOL_GPL(register_pernet_device);
429 510
430int register_pernet_gen_device(int *id, struct pernet_operations *ops)
431{
432 int error;
433 mutex_lock(&net_mutex);
434again:
435 error = ida_get_new_above(&net_generic_ids, 1, id);
436 if (error) {
437 if (error == -EAGAIN) {
438 ida_pre_get(&net_generic_ids, GFP_KERNEL);
439 goto again;
440 }
441 goto out;
442 }
443 error = register_pernet_operations(&pernet_list, ops);
444 if (error)
445 ida_remove(&net_generic_ids, *id);
446 else if (first_device == &pernet_list)
447 first_device = &ops->list;
448out:
449 mutex_unlock(&net_mutex);
450 return error;
451}
452EXPORT_SYMBOL_GPL(register_pernet_gen_device);
453
454/** 511/**
455 * unregister_pernet_device - unregister a network namespace netdevice 512 * unregister_pernet_device - unregister a network namespace netdevice
456 * @ops: pernet operations structure to manipulate 513 * @ops: pernet operations structure to manipulate
@@ -470,17 +527,6 @@ void unregister_pernet_device(struct pernet_operations *ops)
470} 527}
471EXPORT_SYMBOL_GPL(unregister_pernet_device); 528EXPORT_SYMBOL_GPL(unregister_pernet_device);
472 529
473void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
474{
475 mutex_lock(&net_mutex);
476 if (&ops->list == first_device)
477 first_device = first_device->next;
478 unregister_pernet_operations(ops);
479 ida_remove(&net_generic_ids, id);
480 mutex_unlock(&net_mutex);
481}
482EXPORT_SYMBOL_GPL(unregister_pernet_gen_device);
483
484static void net_generic_release(struct rcu_head *rcu) 530static void net_generic_release(struct rcu_head *rcu)
485{ 531{
486 struct net_generic *ng; 532 struct net_generic *ng;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5ce017bf4afa..a23b45f08ec9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -340,6 +340,7 @@ struct pktgen_dev {
340 __u16 cur_udp_src; 340 __u16 cur_udp_src;
341 __u16 cur_queue_map; 341 __u16 cur_queue_map;
342 __u32 cur_pkt_size; 342 __u32 cur_pkt_size;
343 __u32 last_pkt_size;
343 344
344 __u8 hh[14]; 345 __u8 hh[14];
345 /* = { 346 /* = {
@@ -363,6 +364,7 @@ struct pktgen_dev {
363 * device name (not when the inject is 364 * device name (not when the inject is
364 * started as it used to do.) 365 * started as it used to do.)
365 */ 366 */
367 char odevname[32];
366 struct flow_state *flows; 368 struct flow_state *flows;
367 unsigned cflows; /* Concurrent flows (config) */ 369 unsigned cflows; /* Concurrent flows (config) */
368 unsigned lflow; /* Flow length (config) */ 370 unsigned lflow; /* Flow length (config) */
@@ -426,7 +428,7 @@ static const char version[] =
426static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 428static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
427static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 429static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
428static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 430static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
429 const char *ifname); 431 const char *ifname, bool exact);
430static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 432static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
431static void pktgen_run_all_threads(void); 433static void pktgen_run_all_threads(void);
432static void pktgen_reset_all_threads(void); 434static void pktgen_reset_all_threads(void);
@@ -528,7 +530,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
528 seq_printf(seq, 530 seq_printf(seq,
529 " frags: %d delay: %llu clone_skb: %d ifname: %s\n", 531 " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
530 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, 532 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
531 pkt_dev->clone_skb, pkt_dev->odev->name); 533 pkt_dev->clone_skb, pkt_dev->odevname);
532 534
533 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 535 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
534 pkt_dev->lflow); 536 pkt_dev->lflow);
@@ -1688,13 +1690,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
1688 if_lock(t); 1690 if_lock(t);
1689 list_for_each_entry(pkt_dev, &t->if_list, list) 1691 list_for_each_entry(pkt_dev, &t->if_list, list)
1690 if (pkt_dev->running) 1692 if (pkt_dev->running)
1691 seq_printf(seq, "%s ", pkt_dev->odev->name); 1693 seq_printf(seq, "%s ", pkt_dev->odevname);
1692 1694
1693 seq_printf(seq, "\nStopped: "); 1695 seq_printf(seq, "\nStopped: ");
1694 1696
1695 list_for_each_entry(pkt_dev, &t->if_list, list) 1697 list_for_each_entry(pkt_dev, &t->if_list, list)
1696 if (!pkt_dev->running) 1698 if (!pkt_dev->running)
1697 seq_printf(seq, "%s ", pkt_dev->odev->name); 1699 seq_printf(seq, "%s ", pkt_dev->odevname);
1698 1700
1699 if (t->result[0]) 1701 if (t->result[0])
1700 seq_printf(seq, "\nResult: %s\n", t->result); 1702 seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1817,9 +1819,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
1817{ 1819{
1818 struct pktgen_thread *t; 1820 struct pktgen_thread *t;
1819 struct pktgen_dev *pkt_dev = NULL; 1821 struct pktgen_dev *pkt_dev = NULL;
1822 bool exact = (remove == FIND);
1820 1823
1821 list_for_each_entry(t, &pktgen_threads, th_list) { 1824 list_for_each_entry(t, &pktgen_threads, th_list) {
1822 pkt_dev = pktgen_find_dev(t, ifname); 1825 pkt_dev = pktgen_find_dev(t, ifname, exact);
1823 if (pkt_dev) { 1826 if (pkt_dev) {
1824 if (remove) { 1827 if (remove) {
1825 if_lock(t); 1828 if_lock(t);
@@ -1994,7 +1997,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1994 "queue_map_min (zero-based) (%d) exceeds valid range " 1997 "queue_map_min (zero-based) (%d) exceeds valid range "
1995 "[0 - %d] for (%d) queues on %s, resetting\n", 1998 "[0 - %d] for (%d) queues on %s, resetting\n",
1996 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, 1999 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
1997 pkt_dev->odev->name); 2000 pkt_dev->odevname);
1998 pkt_dev->queue_map_min = ntxq - 1; 2001 pkt_dev->queue_map_min = ntxq - 1;
1999 } 2002 }
2000 if (pkt_dev->queue_map_max >= ntxq) { 2003 if (pkt_dev->queue_map_max >= ntxq) {
@@ -2002,7 +2005,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2002 "queue_map_max (zero-based) (%d) exceeds valid range " 2005 "queue_map_max (zero-based) (%d) exceeds valid range "
2003 "[0 - %d] for (%d) queues on %s, resetting\n", 2006 "[0 - %d] for (%d) queues on %s, resetting\n",
2004 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, 2007 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2005 pkt_dev->odev->name); 2008 pkt_dev->odevname);
2006 pkt_dev->queue_map_max = ntxq - 1; 2009 pkt_dev->queue_map_max = ntxq - 1;
2007 } 2010 }
2008 2011
@@ -2049,9 +2052,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2049 read_lock_bh(&idev->lock); 2052 read_lock_bh(&idev->lock);
2050 for (ifp = idev->addr_list; ifp; 2053 for (ifp = idev->addr_list; ifp;
2051 ifp = ifp->if_next) { 2054 ifp = ifp->if_next) {
2052 if (ifp->scope == IFA_LINK 2055 if (ifp->scope == IFA_LINK &&
2053 && !(ifp-> 2056 !(ifp->flags & IFA_F_TENTATIVE)) {
2054 flags & IFA_F_TENTATIVE)) {
2055 ipv6_addr_copy(&pkt_dev-> 2057 ipv6_addr_copy(&pkt_dev->
2056 cur_in6_saddr, 2058 cur_in6_saddr,
2057 &ifp->addr); 2059 &ifp->addr);
@@ -3262,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3262 3264
3263 if (!pkt_dev->running) { 3265 if (!pkt_dev->running) {
3264 printk(KERN_WARNING "pktgen: interface: %s is already " 3266 printk(KERN_WARNING "pktgen: interface: %s is already "
3265 "stopped\n", pkt_dev->odev->name); 3267 "stopped\n", pkt_dev->odevname);
3266 return -EINVAL; 3268 return -EINVAL;
3267 } 3269 }
3268 3270
@@ -3434,7 +3436,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3434 pkt_dev->clone_count--; /* back out increment, OOM */ 3436 pkt_dev->clone_count--; /* back out increment, OOM */
3435 return; 3437 return;
3436 } 3438 }
3437 3439 pkt_dev->last_pkt_size = pkt_dev->skb->len;
3438 pkt_dev->allocated_skbs++; 3440 pkt_dev->allocated_skbs++;
3439 pkt_dev->clone_count = 0; /* reset counter */ 3441 pkt_dev->clone_count = 0; /* reset counter */
3440 } 3442 }
@@ -3461,12 +3463,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3461 pkt_dev->last_ok = 1; 3463 pkt_dev->last_ok = 1;
3462 pkt_dev->sofar++; 3464 pkt_dev->sofar++;
3463 pkt_dev->seq_num++; 3465 pkt_dev->seq_num++;
3464 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 3466 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3465 break; 3467 break;
3466 default: /* Drivers are not supposed to return other values! */ 3468 default: /* Drivers are not supposed to return other values! */
3467 if (net_ratelimit()) 3469 if (net_ratelimit())
3468 pr_info("pktgen: %s xmit error: %d\n", 3470 pr_info("pktgen: %s xmit error: %d\n",
3469 odev->name, ret); 3471 pkt_dev->odevname, ret);
3470 pkt_dev->errors++; 3472 pkt_dev->errors++;
3471 /* fallthru */ 3473 /* fallthru */
3472 case NETDEV_TX_LOCKED: 3474 case NETDEV_TX_LOCKED:
@@ -3569,13 +3571,18 @@ static int pktgen_thread_worker(void *arg)
3569} 3571}
3570 3572
3571static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3573static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3572 const char *ifname) 3574 const char *ifname, bool exact)
3573{ 3575{
3574 struct pktgen_dev *p, *pkt_dev = NULL; 3576 struct pktgen_dev *p, *pkt_dev = NULL;
3575 if_lock(t); 3577 size_t len = strlen(ifname);
3576 3578
3579 if_lock(t);
3577 list_for_each_entry(p, &t->if_list, list) 3580 list_for_each_entry(p, &t->if_list, list)
3578 if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) { 3581 if (strncmp(p->odevname, ifname, len) == 0) {
3582 if (p->odevname[len]) {
3583 if (exact || p->odevname[len] != '@')
3584 continue;
3585 }
3579 pkt_dev = p; 3586 pkt_dev = p;
3580 break; 3587 break;
3581 } 3588 }
@@ -3618,6 +3625,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3618{ 3625{
3619 struct pktgen_dev *pkt_dev; 3626 struct pktgen_dev *pkt_dev;
3620 int err; 3627 int err;
3628 int node = cpu_to_node(t->cpu);
3621 3629
3622 /* We don't allow a device to be on several threads */ 3630 /* We don't allow a device to be on several threads */
3623 3631
@@ -3627,11 +3635,13 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3627 return -EBUSY; 3635 return -EBUSY;
3628 } 3636 }
3629 3637
3630 pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); 3638 pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node);
3631 if (!pkt_dev) 3639 if (!pkt_dev)
3632 return -ENOMEM; 3640 return -ENOMEM;
3633 3641
3634 pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state)); 3642 strcpy(pkt_dev->odevname, ifname);
3643 pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
3644 node);
3635 if (pkt_dev->flows == NULL) { 3645 if (pkt_dev->flows == NULL) {
3636 kfree(pkt_dev); 3646 kfree(pkt_dev);
3637 return -ENOMEM; 3647 return -ENOMEM;
@@ -3693,7 +3703,8 @@ static int __init pktgen_create_thread(int cpu)
3693 struct proc_dir_entry *pe; 3703 struct proc_dir_entry *pe;
3694 struct task_struct *p; 3704 struct task_struct *p;
3695 3705
3696 t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); 3706 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
3707 cpu_to_node(cpu));
3697 if (!t) { 3708 if (!t) {
3698 printk(KERN_ERR "pktgen: ERROR: out of memory, can't " 3709 printk(KERN_ERR "pktgen: ERROR: out of memory, can't "
3699 "create new thread.\n"); 3710 "create new thread.\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 391a62cd9df6..33148a568199 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -38,7 +38,6 @@
38 38
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/system.h> 40#include <asm/system.h>
41#include <asm/string.h>
42 41
43#include <linux/inet.h> 42#include <linux/inet.h>
44#include <linux/netdevice.h> 43#include <linux/netdevice.h>
@@ -53,8 +52,7 @@
53#include <net/rtnetlink.h> 52#include <net/rtnetlink.h>
54#include <net/net_namespace.h> 53#include <net/net_namespace.h>
55 54
56struct rtnl_link 55struct rtnl_link {
57{
58 rtnl_doit_func doit; 56 rtnl_doit_func doit;
59 rtnl_dumpit_func dumpit; 57 rtnl_dumpit_func dumpit;
60}; 58};
@@ -65,6 +63,7 @@ void rtnl_lock(void)
65{ 63{
66 mutex_lock(&rtnl_mutex); 64 mutex_lock(&rtnl_mutex);
67} 65}
66EXPORT_SYMBOL(rtnl_lock);
68 67
69void __rtnl_unlock(void) 68void __rtnl_unlock(void)
70{ 69{
@@ -76,16 +75,19 @@ void rtnl_unlock(void)
76 /* This fellow will unlock it for us. */ 75 /* This fellow will unlock it for us. */
77 netdev_run_todo(); 76 netdev_run_todo();
78} 77}
78EXPORT_SYMBOL(rtnl_unlock);
79 79
80int rtnl_trylock(void) 80int rtnl_trylock(void)
81{ 81{
82 return mutex_trylock(&rtnl_mutex); 82 return mutex_trylock(&rtnl_mutex);
83} 83}
84EXPORT_SYMBOL(rtnl_trylock);
84 85
85int rtnl_is_locked(void) 86int rtnl_is_locked(void)
86{ 87{
87 return mutex_is_locked(&rtnl_mutex); 88 return mutex_is_locked(&rtnl_mutex);
88} 89}
90EXPORT_SYMBOL(rtnl_is_locked);
89 91
90static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 92static struct rtnl_link *rtnl_msg_handlers[NPROTO];
91 93
@@ -168,7 +170,6 @@ int __rtnl_register(int protocol, int msgtype,
168 170
169 return 0; 171 return 0;
170} 172}
171
172EXPORT_SYMBOL_GPL(__rtnl_register); 173EXPORT_SYMBOL_GPL(__rtnl_register);
173 174
174/** 175/**
@@ -188,7 +189,6 @@ void rtnl_register(int protocol, int msgtype,
188 "protocol = %d, message type = %d\n", 189 "protocol = %d, message type = %d\n",
189 protocol, msgtype); 190 protocol, msgtype);
190} 191}
191
192EXPORT_SYMBOL_GPL(rtnl_register); 192EXPORT_SYMBOL_GPL(rtnl_register);
193 193
194/** 194/**
@@ -213,7 +213,6 @@ int rtnl_unregister(int protocol, int msgtype)
213 213
214 return 0; 214 return 0;
215} 215}
216
217EXPORT_SYMBOL_GPL(rtnl_unregister); 216EXPORT_SYMBOL_GPL(rtnl_unregister);
218 217
219/** 218/**
@@ -230,7 +229,6 @@ void rtnl_unregister_all(int protocol)
230 kfree(rtnl_msg_handlers[protocol]); 229 kfree(rtnl_msg_handlers[protocol]);
231 rtnl_msg_handlers[protocol] = NULL; 230 rtnl_msg_handlers[protocol] = NULL;
232} 231}
233
234EXPORT_SYMBOL_GPL(rtnl_unregister_all); 232EXPORT_SYMBOL_GPL(rtnl_unregister_all);
235 233
236static LIST_HEAD(link_ops); 234static LIST_HEAD(link_ops);
@@ -253,7 +251,6 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
253 list_add_tail(&ops->list, &link_ops); 251 list_add_tail(&ops->list, &link_ops);
254 return 0; 252 return 0;
255} 253}
256
257EXPORT_SYMBOL_GPL(__rtnl_link_register); 254EXPORT_SYMBOL_GPL(__rtnl_link_register);
258 255
259/** 256/**
@@ -271,7 +268,6 @@ int rtnl_link_register(struct rtnl_link_ops *ops)
271 rtnl_unlock(); 268 rtnl_unlock();
272 return err; 269 return err;
273} 270}
274
275EXPORT_SYMBOL_GPL(rtnl_link_register); 271EXPORT_SYMBOL_GPL(rtnl_link_register);
276 272
277static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 273static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
@@ -309,7 +305,6 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
309 } 305 }
310 list_del(&ops->list); 306 list_del(&ops->list);
311} 307}
312
313EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 308EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
314 309
315/** 310/**
@@ -322,7 +317,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
322 __rtnl_link_unregister(ops); 317 __rtnl_link_unregister(ops);
323 rtnl_unlock(); 318 rtnl_unlock();
324} 319}
325
326EXPORT_SYMBOL_GPL(rtnl_link_unregister); 320EXPORT_SYMBOL_GPL(rtnl_link_unregister);
327 321
328static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 322static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
@@ -427,12 +421,13 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
427 struct rtattr *rta; 421 struct rtattr *rta;
428 int size = RTA_LENGTH(attrlen); 422 int size = RTA_LENGTH(attrlen);
429 423
430 rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); 424 rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
431 rta->rta_type = attrtype; 425 rta->rta_type = attrtype;
432 rta->rta_len = size; 426 rta->rta_len = size;
433 memcpy(RTA_DATA(rta), data, attrlen); 427 memcpy(RTA_DATA(rta), data, attrlen);
434 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); 428 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
435} 429}
430EXPORT_SYMBOL(__rta_fill);
436 431
437int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) 432int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
438{ 433{
@@ -454,6 +449,7 @@ int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
454 449
455 return nlmsg_unicast(rtnl, skb, pid); 450 return nlmsg_unicast(rtnl, skb, pid);
456} 451}
452EXPORT_SYMBOL(rtnl_unicast);
457 453
458void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 454void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
459 struct nlmsghdr *nlh, gfp_t flags) 455 struct nlmsghdr *nlh, gfp_t flags)
@@ -466,6 +462,7 @@ void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
466 462
467 nlmsg_notify(rtnl, skb, pid, group, report, flags); 463 nlmsg_notify(rtnl, skb, pid, group, report, flags);
468} 464}
465EXPORT_SYMBOL(rtnl_notify);
469 466
470void rtnl_set_sk_err(struct net *net, u32 group, int error) 467void rtnl_set_sk_err(struct net *net, u32 group, int error)
471{ 468{
@@ -473,6 +470,7 @@ void rtnl_set_sk_err(struct net *net, u32 group, int error)
473 470
474 netlink_set_err(rtnl, 0, group, error); 471 netlink_set_err(rtnl, 0, group, error);
475} 472}
473EXPORT_SYMBOL(rtnl_set_sk_err);
476 474
477int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 475int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
478{ 476{
@@ -501,6 +499,7 @@ nla_put_failure:
501 nla_nest_cancel(skb, mx); 499 nla_nest_cancel(skb, mx);
502 return -EMSGSIZE; 500 return -EMSGSIZE;
503} 501}
502EXPORT_SYMBOL(rtnetlink_put_metrics);
504 503
505int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 504int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
506 u32 ts, u32 tsage, long expires, u32 error) 505 u32 ts, u32 tsage, long expires, u32 error)
@@ -520,14 +519,13 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
520 519
521 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 520 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
522} 521}
523
524EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 522EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
525 523
526static void set_operstate(struct net_device *dev, unsigned char transition) 524static void set_operstate(struct net_device *dev, unsigned char transition)
527{ 525{
528 unsigned char operstate = dev->operstate; 526 unsigned char operstate = dev->operstate;
529 527
530 switch(transition) { 528 switch (transition) {
531 case IF_OPER_UP: 529 case IF_OPER_UP:
532 if ((operstate == IF_OPER_DORMANT || 530 if ((operstate == IF_OPER_DORMANT ||
533 operstate == IF_OPER_UNKNOWN) && 531 operstate == IF_OPER_UNKNOWN) &&
@@ -728,12 +726,27 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
728 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 726 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
729 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 727 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
730}; 728};
729EXPORT_SYMBOL(ifla_policy);
731 730
732static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 731static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
733 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 732 [IFLA_INFO_KIND] = { .type = NLA_STRING },
734 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 733 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
735}; 734};
736 735
736struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
737{
738 struct net *net;
739 /* Examine the link attributes and figure out which
740 * network namespace we are talking about.
741 */
742 if (tb[IFLA_NET_NS_PID])
743 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
744 else
745 net = get_net(src_net);
746 return net;
747}
748EXPORT_SYMBOL(rtnl_link_get_net);
749
737static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) 750static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
738{ 751{
739 if (dev) { 752 if (dev) {
@@ -757,8 +770,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
757 int err; 770 int err;
758 771
759 if (tb[IFLA_NET_NS_PID]) { 772 if (tb[IFLA_NET_NS_PID]) {
760 struct net *net; 773 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
761 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
762 if (IS_ERR(net)) { 774 if (IS_ERR(net)) {
763 err = PTR_ERR(net); 775 err = PTR_ERR(net);
764 goto errout; 776 goto errout;
@@ -932,7 +944,8 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
932 goto errout; 944 goto errout;
933 } 945 }
934 946
935 if ((err = validate_linkmsg(dev, tb)) < 0) 947 err = validate_linkmsg(dev, tb);
948 if (err < 0)
936 goto errout; 949 goto errout;
937 950
938 err = do_setlink(dev, ifm, tb, ifname, 0); 951 err = do_setlink(dev, ifm, tb, ifname, 0);
@@ -976,8 +989,8 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
976 return 0; 989 return 0;
977} 990}
978 991
979struct net_device *rtnl_create_link(struct net *net, char *ifname, 992struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
980 const struct rtnl_link_ops *ops, struct nlattr *tb[]) 993 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
981{ 994{
982 int err; 995 int err;
983 struct net_device *dev; 996 struct net_device *dev;
@@ -985,7 +998,8 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
985 unsigned int real_num_queues = 1; 998 unsigned int real_num_queues = 1;
986 999
987 if (ops->get_tx_queues) { 1000 if (ops->get_tx_queues) {
988 err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues); 1001 err = ops->get_tx_queues(src_net, tb, &num_queues,
1002 &real_num_queues);
989 if (err) 1003 if (err)
990 goto err; 1004 goto err;
991 } 1005 }
@@ -994,16 +1008,16 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
994 if (!dev) 1008 if (!dev)
995 goto err; 1009 goto err;
996 1010
1011 dev_net_set(dev, net);
1012 dev->rtnl_link_ops = ops;
997 dev->real_num_tx_queues = real_num_queues; 1013 dev->real_num_tx_queues = real_num_queues;
1014
998 if (strchr(dev->name, '%')) { 1015 if (strchr(dev->name, '%')) {
999 err = dev_alloc_name(dev, dev->name); 1016 err = dev_alloc_name(dev, dev->name);
1000 if (err < 0) 1017 if (err < 0)
1001 goto err_free; 1018 goto err_free;
1002 } 1019 }
1003 1020
1004 dev_net_set(dev, net);
1005 dev->rtnl_link_ops = ops;
1006
1007 if (tb[IFLA_MTU]) 1021 if (tb[IFLA_MTU])
1008 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1022 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1009 if (tb[IFLA_ADDRESS]) 1023 if (tb[IFLA_ADDRESS])
@@ -1026,6 +1040,7 @@ err_free:
1026err: 1040err:
1027 return ERR_PTR(err); 1041 return ERR_PTR(err);
1028} 1042}
1043EXPORT_SYMBOL(rtnl_create_link);
1029 1044
1030static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1045static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1031{ 1046{
@@ -1059,7 +1074,8 @@ replay:
1059 else 1074 else
1060 dev = NULL; 1075 dev = NULL;
1061 1076
1062 if ((err = validate_linkmsg(dev, tb)) < 0) 1077 err = validate_linkmsg(dev, tb);
1078 if (err < 0)
1063 return err; 1079 return err;
1064 1080
1065 if (tb[IFLA_LINKINFO]) { 1081 if (tb[IFLA_LINKINFO]) {
@@ -1080,6 +1096,7 @@ replay:
1080 1096
1081 if (1) { 1097 if (1) {
1082 struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; 1098 struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
1099 struct net *dest_net;
1083 1100
1084 if (ops) { 1101 if (ops) {
1085 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 1102 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
@@ -1144,17 +1161,19 @@ replay:
1144 if (!ifname[0]) 1161 if (!ifname[0])
1145 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 1162 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
1146 1163
1147 dev = rtnl_create_link(net, ifname, ops, tb); 1164 dest_net = rtnl_link_get_net(net, tb);
1165 dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
1148 1166
1149 if (IS_ERR(dev)) 1167 if (IS_ERR(dev))
1150 err = PTR_ERR(dev); 1168 err = PTR_ERR(dev);
1151 else if (ops->newlink) 1169 else if (ops->newlink)
1152 err = ops->newlink(dev, tb, data); 1170 err = ops->newlink(net, dev, tb, data);
1153 else 1171 else
1154 err = register_netdevice(dev); 1172 err = register_netdevice(dev);
1155
1156 if (err < 0 && !IS_ERR(dev)) 1173 if (err < 0 && !IS_ERR(dev))
1157 free_netdev(dev); 1174 free_netdev(dev);
1175
1176 put_net(dest_net);
1158 return err; 1177 return err;
1159 } 1178 }
1160} 1179}
@@ -1210,7 +1229,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1210 1229
1211 if (s_idx == 0) 1230 if (s_idx == 0)
1212 s_idx = 1; 1231 s_idx = 1;
1213 for (idx=1; idx<NPROTO; idx++) { 1232 for (idx = 1; idx < NPROTO; idx++) {
1214 int type = cb->nlh->nlmsg_type-RTM_BASE; 1233 int type = cb->nlh->nlmsg_type-RTM_BASE;
1215 if (idx < s_idx || idx == PF_PACKET) 1234 if (idx < s_idx || idx == PF_PACKET)
1216 continue; 1235 continue;
@@ -1277,7 +1296,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1277 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) 1296 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
1278 return 0; 1297 return 0;
1279 1298
1280 family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; 1299 family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
1281 if (family >= NPROTO) 1300 if (family >= NPROTO)
1282 return -EAFNOSUPPORT; 1301 return -EAFNOSUPPORT;
1283 1302
@@ -1310,7 +1329,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1310 1329
1311 if (nlh->nlmsg_len > min_len) { 1330 if (nlh->nlmsg_len > min_len) {
1312 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); 1331 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
1313 struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len); 1332 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
1314 1333
1315 while (RTA_OK(attr, attrlen)) { 1334 while (RTA_OK(attr, attrlen)) {
1316 unsigned flavor = attr->rta_type; 1335 unsigned flavor = attr->rta_type;
@@ -1416,14 +1435,3 @@ void __init rtnetlink_init(void)
1416 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); 1435 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
1417} 1436}
1418 1437
1419EXPORT_SYMBOL(__rta_fill);
1420EXPORT_SYMBOL(rtnetlink_put_metrics);
1421EXPORT_SYMBOL(rtnl_lock);
1422EXPORT_SYMBOL(rtnl_trylock);
1423EXPORT_SYMBOL(rtnl_unlock);
1424EXPORT_SYMBOL(rtnl_is_locked);
1425EXPORT_SYMBOL(rtnl_unicast);
1426EXPORT_SYMBOL(rtnl_notify);
1427EXPORT_SYMBOL(rtnl_set_sk_err);
1428EXPORT_SYMBOL(rtnl_create_link);
1429EXPORT_SYMBOL(ifla_policy);
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
deleted file mode 100644
index 79687dfd6957..000000000000
--- a/net/core/skb_dma_map.c
+++ /dev/null
@@ -1,65 +0,0 @@
1/* skb_dma_map.c: DMA mapping helpers for socket buffers.
2 *
3 * Copyright (C) David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/dma-mapping.h>
9#include <linux/skbuff.h>
10
11int skb_dma_map(struct device *dev, struct sk_buff *skb,
12 enum dma_data_direction dir)
13{
14 struct skb_shared_info *sp = skb_shinfo(skb);
15 dma_addr_t map;
16 int i;
17
18 map = dma_map_single(dev, skb->data,
19 skb_headlen(skb), dir);
20 if (dma_mapping_error(dev, map))
21 goto out_err;
22
23 sp->dma_head = map;
24 for (i = 0; i < sp->nr_frags; i++) {
25 skb_frag_t *fp = &sp->frags[i];
26
27 map = dma_map_page(dev, fp->page, fp->page_offset,
28 fp->size, dir);
29 if (dma_mapping_error(dev, map))
30 goto unwind;
31 sp->dma_maps[i] = map;
32 }
33
34 return 0;
35
36unwind:
37 while (--i >= 0) {
38 skb_frag_t *fp = &sp->frags[i];
39
40 dma_unmap_page(dev, sp->dma_maps[i],
41 fp->size, dir);
42 }
43 dma_unmap_single(dev, sp->dma_head,
44 skb_headlen(skb), dir);
45out_err:
46 return -ENOMEM;
47}
48EXPORT_SYMBOL(skb_dma_map);
49
50void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
51 enum dma_data_direction dir)
52{
53 struct skb_shared_info *sp = skb_shinfo(skb);
54 int i;
55
56 dma_unmap_single(dev, sp->dma_head,
57 skb_headlen(skb), dir);
58 for (i = 0; i < sp->nr_frags; i++) {
59 skb_frag_t *fp = &sp->frags[i];
60
61 dma_unmap_page(dev, sp->dma_maps[i],
62 fp->size, dir);
63 }
64}
65EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 80a96166df39..bfa3e7865a8c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -493,6 +493,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
493{ 493{
494 struct skb_shared_info *shinfo; 494 struct skb_shared_info *shinfo;
495 495
496 if (irqs_disabled())
497 return 0;
498
496 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
497 return 0; 500 return 0;
498 501
@@ -546,7 +549,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
546#endif 549#endif
547 new->protocol = old->protocol; 550 new->protocol = old->protocol;
548 new->mark = old->mark; 551 new->mark = old->mark;
549 new->iif = old->iif; 552 new->skb_iif = old->skb_iif;
550 __nf_copy(new, old); 553 __nf_copy(new, old);
551#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 554#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
552 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 555 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -2701,7 +2704,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2701 2704
2702 NAPI_GRO_CB(skb)->free = 1; 2705 NAPI_GRO_CB(skb)->free = 1;
2703 goto done; 2706 goto done;
2704 } 2707 } else if (skb_gro_len(p) != pinfo->gso_size)
2708 return -E2BIG;
2705 2709
2706 headroom = skb_headroom(p); 2710 headroom = skb_headroom(p);
2707 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); 2711 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
diff --git a/net/core/sock.c b/net/core/sock.c
index 5a51512f638a..76ff58d43e26 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -417,17 +417,18 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
417 if (copy_from_user(devname, optval, optlen)) 417 if (copy_from_user(devname, optval, optlen))
418 goto out; 418 goto out;
419 419
420 if (devname[0] == '\0') { 420 index = 0;
421 index = 0; 421 if (devname[0] != '\0') {
422 } else { 422 struct net_device *dev;
423 struct net_device *dev = dev_get_by_name(net, devname); 423
424 424 rcu_read_lock();
425 dev = dev_get_by_name_rcu(net, devname);
426 if (dev)
427 index = dev->ifindex;
428 rcu_read_unlock();
425 ret = -ENODEV; 429 ret = -ENODEV;
426 if (!dev) 430 if (!dev)
427 goto out; 431 goto out;
428
429 index = dev->ifindex;
430 dev_put(dev);
431 } 432 }
432 433
433 lock_sock(sk); 434 lock_sock(sk);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7db1de0497c6..fcfc5458c399 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -134,7 +134,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
134 net->core.sysctl_somaxconn = SOMAXCONN; 134 net->core.sysctl_somaxconn = SOMAXCONN;
135 135
136 tbl = netns_core_table; 136 tbl = netns_core_table;
137 if (net != &init_net) { 137 if (!net_eq(net, &init_net)) {
138 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); 138 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
139 if (tbl == NULL) 139 if (tbl == NULL)
140 goto err_dup; 140 goto err_dup;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index ac1205df6c86..db9f5b39388f 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1085,8 +1085,8 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1085 u8 value_byte; 1085 u8 value_byte;
1086 u32 value_int; 1086 u32 value_int;
1087 1087
1088 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg 1088 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1089 || !netdev->dcbnl_ops->setbcnrp) 1089 !netdev->dcbnl_ops->setbcnrp)
1090 return ret; 1090 return ret;
1091 1091
1092 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, 1092 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
@@ -1126,7 +1126,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1126 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 1126 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1127 int ret = -EINVAL; 1127 int ret = -EINVAL;
1128 1128
1129 if (net != &init_net) 1129 if (!net_eq(net, &init_net))
1130 return -EINVAL; 1130 return -EINVAL;
1131 1131
1132 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, 1132 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 00028d4b09d9..efbcfdc12796 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -477,7 +477,8 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
477 return &rt->u.dst; 477 return &rt->u.dst;
478} 478}
479 479
480static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) 480static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
481 struct request_values *rv_unused)
481{ 482{
482 int err = -1; 483 int err = -1;
483 struct sk_buff *skb; 484 struct sk_buff *skb;
@@ -626,7 +627,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
626 dreq->dreq_iss = dccp_v4_init_sequence(skb); 627 dreq->dreq_iss = dccp_v4_init_sequence(skb);
627 dreq->dreq_service = service; 628 dreq->dreq_service = service;
628 629
629 if (dccp_v4_send_response(sk, req)) 630 if (dccp_v4_send_response(sk, req, NULL))
630 goto drop_and_free; 631 goto drop_and_free;
631 632
632 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 633 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -991,7 +992,6 @@ static struct inet_protosw dccp_v4_protosw = {
991 .protocol = IPPROTO_DCCP, 992 .protocol = IPPROTO_DCCP,
992 .prot = &dccp_v4_prot, 993 .prot = &dccp_v4_prot,
993 .ops = &inet_dccp_ops, 994 .ops = &inet_dccp_ops,
994 .capability = -1,
995 .no_check = 0, 995 .no_check = 0,
996 .flags = INET_PROTOSW_ICSK, 996 .flags = INET_PROTOSW_ICSK,
997}; 997};
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6d89f9f7d5d8..6574215a1f51 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -241,7 +241,8 @@ out:
241} 241}
242 242
243 243
244static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) 244static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
245 struct request_values *rv_unused)
245{ 246{
246 struct inet6_request_sock *ireq6 = inet6_rsk(req); 247 struct inet6_request_sock *ireq6 = inet6_rsk(req);
247 struct ipv6_pinfo *np = inet6_sk(sk); 248 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -468,7 +469,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
468 dreq->dreq_iss = dccp_v6_init_sequence(skb); 469 dreq->dreq_iss = dccp_v6_init_sequence(skb);
469 dreq->dreq_service = service; 470 dreq->dreq_service = service;
470 471
471 if (dccp_v6_send_response(sk, req)) 472 if (dccp_v6_send_response(sk, req, NULL))
472 goto drop_and_free; 473 goto drop_and_free;
473 474
474 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 475 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -1185,7 +1186,6 @@ static struct inet_protosw dccp_v6_protosw = {
1185 .protocol = IPPROTO_DCCP, 1186 .protocol = IPPROTO_DCCP,
1186 .prot = &dccp_v6_prot, 1187 .prot = &dccp_v6_prot,
1187 .ops = &inet6_dccp_ops, 1188 .ops = &inet6_dccp_ops,
1188 .capability = -1,
1189 .flags = INET_PROTOSW_ICSK, 1189 .flags = INET_PROTOSW_ICSK,
1190}; 1190};
1191 1191
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5ca49cec95f5..af226a063141 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -184,7 +184,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
184 * counter (backoff, monitored by dccp_response_timer). 184 * counter (backoff, monitored by dccp_response_timer).
185 */ 185 */
186 req->retrans++; 186 req->retrans++;
187 req->rsk_ops->rtx_syn_ack(sk, req); 187 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
188 } 188 }
189 /* Network Duplicate, discard packet */ 189 /* Network Duplicate, discard packet */
190 return NULL; 190 return NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 664965c87e16..2b494fac9468 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -675,11 +675,12 @@ char *dn_addr2asc(__u16 addr, char *buf)
675 675
676 676
677 677
678static int dn_create(struct net *net, struct socket *sock, int protocol) 678static int dn_create(struct net *net, struct socket *sock, int protocol,
679 int kern)
679{ 680{
680 struct sock *sk; 681 struct sock *sk;
681 682
682 if (net != &init_net) 683 if (!net_eq(net, &init_net))
683 return -EAFNOSUPPORT; 684 return -EAFNOSUPPORT;
684 685
685 switch(sock->type) { 686 switch(sock->type) {
@@ -749,9 +750,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
749 750
750 if (!(saddr->sdn_flags & SDF_WILD)) { 751 if (!(saddr->sdn_flags & SDF_WILD)) {
751 if (le16_to_cpu(saddr->sdn_nodeaddrl)) { 752 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
752 read_lock(&dev_base_lock); 753 rcu_read_lock();
753 ldev = NULL; 754 ldev = NULL;
754 for_each_netdev(&init_net, dev) { 755 for_each_netdev_rcu(&init_net, dev) {
755 if (!dev->dn_ptr) 756 if (!dev->dn_ptr)
756 continue; 757 continue;
757 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { 758 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
@@ -759,7 +760,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
759 break; 760 break;
760 } 761 }
761 } 762 }
762 read_unlock(&dev_base_lock); 763 rcu_read_unlock();
763 if (ldev == NULL) 764 if (ldev == NULL)
764 return -EADDRNOTAVAIL; 765 return -EADDRNOTAVAIL;
765 } 766 }
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 6e1f085db06a..f20dec9cfa06 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -68,7 +68,7 @@ extern struct neigh_table dn_neigh_table;
68 */ 68 */
69__le16 decnet_address = 0; 69__le16 decnet_address = 0;
70 70
71static DEFINE_RWLOCK(dndev_lock); 71static DEFINE_SPINLOCK(dndev_lock);
72static struct net_device *decnet_default_device; 72static struct net_device *decnet_default_device;
73static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); 73static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
74 74
@@ -557,7 +557,8 @@ rarok:
557struct net_device *dn_dev_get_default(void) 557struct net_device *dn_dev_get_default(void)
558{ 558{
559 struct net_device *dev; 559 struct net_device *dev;
560 read_lock(&dndev_lock); 560
561 spin_lock(&dndev_lock);
561 dev = decnet_default_device; 562 dev = decnet_default_device;
562 if (dev) { 563 if (dev) {
563 if (dev->dn_ptr) 564 if (dev->dn_ptr)
@@ -565,7 +566,8 @@ struct net_device *dn_dev_get_default(void)
565 else 566 else
566 dev = NULL; 567 dev = NULL;
567 } 568 }
568 read_unlock(&dndev_lock); 569 spin_unlock(&dndev_lock);
570
569 return dev; 571 return dev;
570} 572}
571 573
@@ -575,13 +577,15 @@ int dn_dev_set_default(struct net_device *dev, int force)
575 int rv = -EBUSY; 577 int rv = -EBUSY;
576 if (!dev->dn_ptr) 578 if (!dev->dn_ptr)
577 return -ENODEV; 579 return -ENODEV;
578 write_lock(&dndev_lock); 580
581 spin_lock(&dndev_lock);
579 if (force || decnet_default_device == NULL) { 582 if (force || decnet_default_device == NULL) {
580 old = decnet_default_device; 583 old = decnet_default_device;
581 decnet_default_device = dev; 584 decnet_default_device = dev;
582 rv = 0; 585 rv = 0;
583 } 586 }
584 write_unlock(&dndev_lock); 587 spin_unlock(&dndev_lock);
588
585 if (old) 589 if (old)
586 dev_put(old); 590 dev_put(old);
587 return rv; 591 return rv;
@@ -589,26 +593,29 @@ int dn_dev_set_default(struct net_device *dev, int force)
589 593
590static void dn_dev_check_default(struct net_device *dev) 594static void dn_dev_check_default(struct net_device *dev)
591{ 595{
592 write_lock(&dndev_lock); 596 spin_lock(&dndev_lock);
593 if (dev == decnet_default_device) { 597 if (dev == decnet_default_device) {
594 decnet_default_device = NULL; 598 decnet_default_device = NULL;
595 } else { 599 } else {
596 dev = NULL; 600 dev = NULL;
597 } 601 }
598 write_unlock(&dndev_lock); 602 spin_unlock(&dndev_lock);
603
599 if (dev) 604 if (dev)
600 dev_put(dev); 605 dev_put(dev);
601} 606}
602 607
608/*
609 * Called with RTNL
610 */
603static struct dn_dev *dn_dev_by_index(int ifindex) 611static struct dn_dev *dn_dev_by_index(int ifindex)
604{ 612{
605 struct net_device *dev; 613 struct net_device *dev;
606 struct dn_dev *dn_dev = NULL; 614 struct dn_dev *dn_dev = NULL;
607 dev = dev_get_by_index(&init_net, ifindex); 615
608 if (dev) { 616 dev = __dev_get_by_index(&init_net, ifindex);
617 if (dev)
609 dn_dev = dev->dn_ptr; 618 dn_dev = dev->dn_ptr;
610 dev_put(dev);
611 }
612 619
613 return dn_dev; 620 return dn_dev;
614} 621}
@@ -629,7 +636,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
629 struct dn_ifaddr *ifa, **ifap; 636 struct dn_ifaddr *ifa, **ifap;
630 int err = -EINVAL; 637 int err = -EINVAL;
631 638
632 if (net != &init_net) 639 if (!net_eq(net, &init_net))
633 goto errout; 640 goto errout;
634 641
635 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); 642 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
@@ -668,7 +675,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
668 struct dn_ifaddr *ifa; 675 struct dn_ifaddr *ifa;
669 int err; 676 int err;
670 677
671 if (net != &init_net) 678 if (!net_eq(net, &init_net))
672 return -EINVAL; 679 return -EINVAL;
673 680
674 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); 681 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
@@ -782,7 +789,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
782 struct dn_dev *dn_db; 789 struct dn_dev *dn_db;
783 struct dn_ifaddr *ifa; 790 struct dn_ifaddr *ifa;
784 791
785 if (net != &init_net) 792 if (!net_eq(net, &init_net))
786 return 0; 793 return 0;
787 794
788 skip_ndevs = cb->args[0]; 795 skip_ndevs = cb->args[0];
@@ -826,13 +833,17 @@ static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
826 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 833 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
827 struct dn_ifaddr *ifa; 834 struct dn_ifaddr *ifa;
828 int rv = -ENODEV; 835 int rv = -ENODEV;
836
829 if (dn_db == NULL) 837 if (dn_db == NULL)
830 goto out; 838 goto out;
839
840 rtnl_lock();
831 ifa = dn_db->ifa_list; 841 ifa = dn_db->ifa_list;
832 if (ifa != NULL) { 842 if (ifa != NULL) {
833 *addr = ifa->ifa_local; 843 *addr = ifa->ifa_local;
834 rv = 0; 844 rv = 0;
835 } 845 }
846 rtnl_unlock();
836out: 847out:
837 return rv; 848 return rv;
838} 849}
@@ -854,9 +865,7 @@ int dn_dev_bind_default(__le16 *addr)
854 dev = dn_dev_get_default(); 865 dev = dn_dev_get_default();
855last_chance: 866last_chance:
856 if (dev) { 867 if (dev) {
857 read_lock(&dev_base_lock);
858 rv = dn_dev_get_first(dev, addr); 868 rv = dn_dev_get_first(dev, addr);
859 read_unlock(&dev_base_lock);
860 dev_put(dev); 869 dev_put(dev);
861 if (rv == 0 || dev == init_net.loopback_dev) 870 if (rv == 0 || dev == init_net.loopback_dev)
862 return rv; 871 return rv;
@@ -1321,18 +1330,18 @@ static inline int is_dn_dev(struct net_device *dev)
1321} 1330}
1322 1331
1323static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) 1332static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1324 __acquires(&dev_base_lock) 1333 __acquires(rcu)
1325{ 1334{
1326 int i; 1335 int i;
1327 struct net_device *dev; 1336 struct net_device *dev;
1328 1337
1329 read_lock(&dev_base_lock); 1338 rcu_read_lock();
1330 1339
1331 if (*pos == 0) 1340 if (*pos == 0)
1332 return SEQ_START_TOKEN; 1341 return SEQ_START_TOKEN;
1333 1342
1334 i = 1; 1343 i = 1;
1335 for_each_netdev(&init_net, dev) { 1344 for_each_netdev_rcu(&init_net, dev) {
1336 if (!is_dn_dev(dev)) 1345 if (!is_dn_dev(dev))
1337 continue; 1346 continue;
1338 1347
@@ -1353,7 +1362,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1353 if (v == SEQ_START_TOKEN) 1362 if (v == SEQ_START_TOKEN)
1354 dev = net_device_entry(&init_net.dev_base_head); 1363 dev = net_device_entry(&init_net.dev_base_head);
1355 1364
1356 for_each_netdev_continue(&init_net, dev) { 1365 for_each_netdev_continue_rcu(&init_net, dev) {
1357 if (!is_dn_dev(dev)) 1366 if (!is_dn_dev(dev))
1358 continue; 1367 continue;
1359 1368
@@ -1364,9 +1373,9 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1364} 1373}
1365 1374
1366static void dn_dev_seq_stop(struct seq_file *seq, void *v) 1375static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1367 __releases(&dev_base_lock) 1376 __releases(rcu)
1368{ 1377{
1369 read_unlock(&dev_base_lock); 1378 rcu_read_unlock();
1370} 1379}
1371 1380
1372static char *dn_type2asc(char type) 1381static char *dn_type2asc(char type)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 27ea2e9b080a..e9d48700e83a 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -509,7 +509,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
509 struct rtattr **rta = arg; 509 struct rtattr **rta = arg;
510 struct rtmsg *r = NLMSG_DATA(nlh); 510 struct rtmsg *r = NLMSG_DATA(nlh);
511 511
512 if (net != &init_net) 512 if (!net_eq(net, &init_net))
513 return -EINVAL; 513 return -EINVAL;
514 514
515 if (dn_fib_check_attr(r, rta)) 515 if (dn_fib_check_attr(r, rta))
@@ -529,7 +529,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
529 struct rtattr **rta = arg; 529 struct rtattr **rta = arg;
530 struct rtmsg *r = NLMSG_DATA(nlh); 530 struct rtmsg *r = NLMSG_DATA(nlh);
531 531
532 if (net != &init_net) 532 if (!net_eq(net, &init_net))
533 return -EINVAL; 533 return -EINVAL;
534 534
535 if (dn_fib_check_attr(r, rta)) 535 if (dn_fib_check_attr(r, rta))
@@ -607,8 +607,8 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
607 ASSERT_RTNL(); 607 ASSERT_RTNL();
608 608
609 /* Scan device list */ 609 /* Scan device list */
610 read_lock(&dev_base_lock); 610 rcu_read_lock();
611 for_each_netdev(&init_net, dev) { 611 for_each_netdev_rcu(&init_net, dev) {
612 dn_db = dev->dn_ptr; 612 dn_db = dev->dn_ptr;
613 if (dn_db == NULL) 613 if (dn_db == NULL)
614 continue; 614 continue;
@@ -619,7 +619,7 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
619 } 619 }
620 } 620 }
621 } 621 }
622 read_unlock(&dev_base_lock); 622 rcu_read_unlock();
623 623
624 if (found_it == 0) { 624 if (found_it == 0) {
625 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); 625 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 57662cabaf9b..a03284061a31 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -908,8 +908,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
908 dev_put(dev_out); 908 dev_put(dev_out);
909 goto out; 909 goto out;
910 } 910 }
911 read_lock(&dev_base_lock); 911 rcu_read_lock();
912 for_each_netdev(&init_net, dev) { 912 for_each_netdev_rcu(&init_net, dev) {
913 if (!dev->dn_ptr) 913 if (!dev->dn_ptr)
914 continue; 914 continue;
915 if (!dn_dev_islocal(dev, oldflp->fld_src)) 915 if (!dn_dev_islocal(dev, oldflp->fld_src))
@@ -922,7 +922,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
922 dev_out = dev; 922 dev_out = dev;
923 break; 923 break;
924 } 924 }
925 read_unlock(&dev_base_lock); 925 rcu_read_unlock();
926 if (dev_out == NULL) 926 if (dev_out == NULL)
927 goto out; 927 goto out;
928 dev_hold(dev_out); 928 dev_hold(dev_out);
@@ -1517,7 +1517,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1517 struct sk_buff *skb; 1517 struct sk_buff *skb;
1518 struct flowi fl; 1518 struct flowi fl;
1519 1519
1520 if (net != &init_net) 1520 if (!net_eq(net, &init_net))
1521 return -EINVAL; 1521 return -EINVAL;
1522 1522
1523 memset(&fl, 0, sizeof(fl)); 1523 memset(&fl, 0, sizeof(fl));
@@ -1602,7 +1602,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1602 int h, s_h; 1602 int h, s_h;
1603 int idx, s_idx; 1603 int idx, s_idx;
1604 1604
1605 if (net != &init_net) 1605 if (!net_eq(net, &init_net))
1606 return 0; 1606 return 0;
1607 1607
1608 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1608 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 72495f25269f..7466c546f286 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -33,7 +33,7 @@
33#include <net/dn_dev.h> 33#include <net/dn_dev.h>
34#include <net/dn_route.h> 34#include <net/dn_route.h>
35 35
36static struct fib_rules_ops dn_fib_rules_ops; 36static struct fib_rules_ops *dn_fib_rules_ops;
37 37
38struct dn_fib_rule 38struct dn_fib_rule
39{ 39{
@@ -56,7 +56,7 @@ int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res)
56 }; 56 };
57 int err; 57 int err;
58 58
59 err = fib_rules_lookup(&dn_fib_rules_ops, flp, 0, &arg); 59 err = fib_rules_lookup(dn_fib_rules_ops, flp, 0, &arg);
60 res->r = arg.rule; 60 res->r = arg.rule;
61 61
62 return err; 62 return err;
@@ -217,9 +217,9 @@ static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
217 struct list_head *pos; 217 struct list_head *pos;
218 struct fib_rule *rule; 218 struct fib_rule *rule;
219 219
220 if (!list_empty(&dn_fib_rules_ops.rules_list)) { 220 if (!list_empty(&dn_fib_rules_ops->rules_list)) {
221 pos = dn_fib_rules_ops.rules_list.next; 221 pos = dn_fib_rules_ops->rules_list.next;
222 if (pos->next != &dn_fib_rules_ops.rules_list) { 222 if (pos->next != &dn_fib_rules_ops->rules_list) {
223 rule = list_entry(pos->next, struct fib_rule, list); 223 rule = list_entry(pos->next, struct fib_rule, list);
224 if (rule->pref) 224 if (rule->pref)
225 return rule->pref - 1; 225 return rule->pref - 1;
@@ -234,7 +234,7 @@ static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
234 dn_rt_cache_flush(-1); 234 dn_rt_cache_flush(-1);
235} 235}
236 236
237static struct fib_rules_ops dn_fib_rules_ops = { 237static struct fib_rules_ops dn_fib_rules_ops_template = {
238 .family = AF_DECnet, 238 .family = AF_DECnet,
239 .rule_size = sizeof(struct dn_fib_rule), 239 .rule_size = sizeof(struct dn_fib_rule),
240 .addr_size = sizeof(u16), 240 .addr_size = sizeof(u16),
@@ -247,21 +247,23 @@ static struct fib_rules_ops dn_fib_rules_ops = {
247 .flush_cache = dn_fib_rule_flush_cache, 247 .flush_cache = dn_fib_rule_flush_cache,
248 .nlgroup = RTNLGRP_DECnet_RULE, 248 .nlgroup = RTNLGRP_DECnet_RULE,
249 .policy = dn_fib_rule_policy, 249 .policy = dn_fib_rule_policy,
250 .rules_list = LIST_HEAD_INIT(dn_fib_rules_ops.rules_list),
251 .owner = THIS_MODULE, 250 .owner = THIS_MODULE,
252 .fro_net = &init_net, 251 .fro_net = &init_net,
253}; 252};
254 253
255void __init dn_fib_rules_init(void) 254void __init dn_fib_rules_init(void)
256{ 255{
257 BUG_ON(fib_default_rule_add(&dn_fib_rules_ops, 0x7fff, 256 dn_fib_rules_ops =
257 fib_rules_register(&dn_fib_rules_ops_template, &init_net);
258 BUG_ON(IS_ERR(dn_fib_rules_ops));
259 BUG_ON(fib_default_rule_add(dn_fib_rules_ops, 0x7fff,
258 RT_TABLE_MAIN, 0)); 260 RT_TABLE_MAIN, 0));
259 fib_rules_register(&dn_fib_rules_ops);
260} 261}
261 262
262void __exit dn_fib_rules_cleanup(void) 263void __exit dn_fib_rules_cleanup(void)
263{ 264{
264 fib_rules_unregister(&dn_fib_rules_ops); 265 fib_rules_unregister(dn_fib_rules_ops);
266 rcu_barrier();
265} 267}
266 268
267 269
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 67054b0d550f..b9a33bb5e9cc 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -471,7 +471,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
471 struct hlist_node *node; 471 struct hlist_node *node;
472 int dumped = 0; 472 int dumped = 0;
473 473
474 if (net != &init_net) 474 if (!net_eq(net, &init_net))
475 return 0; 475 return 0;
476 476
477 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) && 477 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
@@ -581,8 +581,9 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct
581 DN_FIB_SCAN_KEY(f, fp, key) { 581 DN_FIB_SCAN_KEY(f, fp, key) {
582 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority) 582 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
583 break; 583 break;
584 if (f->fn_type == type && f->fn_scope == r->rtm_scope 584 if (f->fn_type == type &&
585 && DN_FIB_INFO(f) == fi) 585 f->fn_scope == r->rtm_scope &&
586 DN_FIB_INFO(f) == fi)
586 goto out; 587 goto out;
587 } 588 }
588 589
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 26b0ab1e9f56..2036568beea9 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -263,11 +263,10 @@ static int dn_def_dev_strategy(ctl_table *table,
263 return -ENODEV; 263 return -ENODEV;
264 264
265 rv = -ENODEV; 265 rv = -ENODEV;
266 if (dev->dn_ptr != NULL) { 266 if (dev->dn_ptr != NULL)
267 rv = dn_dev_set_default(dev, 1); 267 rv = dn_dev_set_default(dev, 1);
268 if (rv) 268 if (rv)
269 dev_put(dev); 269 dev_put(dev);
270 }
271 } 270 }
272 271
273 return rv; 272 return rv;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 5e9426a11c3e..29b4931aae52 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -605,13 +605,14 @@ static struct proto econet_proto = {
605 * Create an Econet socket 605 * Create an Econet socket
606 */ 606 */
607 607
608static int econet_create(struct net *net, struct socket *sock, int protocol) 608static int econet_create(struct net *net, struct socket *sock, int protocol,
609 int kern)
609{ 610{
610 struct sock *sk; 611 struct sock *sk;
611 struct econet_sock *eo; 612 struct econet_sock *eo;
612 int err; 613 int err;
613 614
614 if (net != &init_net) 615 if (!net_eq(net, &init_net))
615 return -EAFNOSUPPORT; 616 return -EAFNOSUPPORT;
616 617
617 /* Econet only provides datagram services. */ 618 /* Econet only provides datagram services. */
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5a883affecd3..dd3db88f8f0a 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -393,10 +393,3 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
393 return ((ssize_t) l); 393 return ((ssize_t) l);
394} 394}
395EXPORT_SYMBOL(sysfs_format_mac); 395EXPORT_SYMBOL(sysfs_format_mac);
396
397char *print_mac(char *buf, const unsigned char *addr)
398{
399 _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN);
400 return buf;
401}
402EXPORT_SYMBOL(print_mac);
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 4068a9f5113e..ce2d33582859 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o wpan-class.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2nl802154-y := netlink.o nl_policy.o 2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3af_802154-y := af_ieee802154.o raw.o dgram.o
4 4
5ccflags-y += -Wall -DDEBUG 5ccflags-y += -Wall -DDEBUG
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 309348fba72b..bad1c49fd960 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -234,14 +234,14 @@ static const struct proto_ops ieee802154_dgram_ops = {
234 * set the state. 234 * set the state.
235 */ 235 */
236static int ieee802154_create(struct net *net, struct socket *sock, 236static int ieee802154_create(struct net *net, struct socket *sock,
237 int protocol) 237 int protocol, int kern)
238{ 238{
239 struct sock *sk; 239 struct sock *sk;
240 int rc; 240 int rc;
241 struct proto *proto; 241 struct proto *proto;
242 const struct proto_ops *ops; 242 const struct proto_ops *ops;
243 243
244 if (net != &init_net) 244 if (!net_eq(net, &init_net))
245 return -EAFNOSUPPORT; 245 return -EAFNOSUPPORT;
246 246
247 switch (sock->type) { 247 switch (sock->type) {
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
new file mode 100644
index 000000000000..aadec428e6ec
--- /dev/null
+++ b/net/ieee802154/ieee802154.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2007, 2008, 2009 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18#ifndef IEEE_802154_LOCAL_H
19#define IEEE_802154_LOCAL_H
20
21int __init ieee802154_nl_init(void);
22void __exit ieee802154_nl_exit(void);
23
24#define IEEE802154_OP(_cmd, _func) \
25 { \
26 .cmd = _cmd, \
27 .policy = ieee802154_policy, \
28 .doit = _func, \
29 .dumpit = NULL, \
30 .flags = GENL_ADMIN_PERM, \
31 }
32
33#define IEEE802154_DUMP(_cmd, _func, _dump) \
34 { \
35 .cmd = _cmd, \
36 .policy = ieee802154_policy, \
37 .doit = _func, \
38 .dumpit = _dump, \
39 }
40
41struct genl_info;
42
43struct sk_buff *ieee802154_nl_create(int flags, u8 req);
44int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group);
45struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
46 int flags, u8 req);
47int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info);
48
49extern struct genl_family nl802154_family;
50int nl802154_mac_register(void);
51int nl802154_phy_register(void);
52
53#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index ca767bde17a4..33137b99e471 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -23,21 +23,15 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/if_arp.h>
27#include <linux/netdevice.h>
28#include <net/netlink.h>
29#include <net/genetlink.h> 26#include <net/genetlink.h>
30#include <net/sock.h>
31#include <linux/nl802154.h> 27#include <linux/nl802154.h>
32#include <net/af_ieee802154.h> 28
33#include <net/nl802154.h> 29#include "ieee802154.h"
34#include <net/ieee802154.h>
35#include <net/ieee802154_netdev.h>
36 30
37static unsigned int ieee802154_seq_num; 31static unsigned int ieee802154_seq_num;
38static DEFINE_SPINLOCK(ieee802154_seq_lock); 32static DEFINE_SPINLOCK(ieee802154_seq_lock);
39 33
40static struct genl_family ieee802154_coordinator_family = { 34struct genl_family nl802154_family = {
41 .id = GENL_ID_GENERATE, 35 .id = GENL_ID_GENERATE,
42 .hdrsize = 0, 36 .hdrsize = 0,
43 .name = IEEE802154_NL_NAME, 37 .name = IEEE802154_NL_NAME,
@@ -45,16 +39,8 @@ static struct genl_family ieee802154_coordinator_family = {
45 .maxattr = IEEE802154_ATTR_MAX, 39 .maxattr = IEEE802154_ATTR_MAX,
46}; 40};
47 41
48static struct genl_multicast_group ieee802154_coord_mcgrp = {
49 .name = IEEE802154_MCAST_COORD_NAME,
50};
51
52static struct genl_multicast_group ieee802154_beacon_mcgrp = {
53 .name = IEEE802154_MCAST_BEACON_NAME,
54};
55
56/* Requests to userspace */ 42/* Requests to userspace */
57static struct sk_buff *ieee802154_nl_create(int flags, u8 req) 43struct sk_buff *ieee802154_nl_create(int flags, u8 req)
58{ 44{
59 void *hdr; 45 void *hdr;
60 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 46 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -65,7 +51,7 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
65 51
66 spin_lock_irqsave(&ieee802154_seq_lock, f); 52 spin_lock_irqsave(&ieee802154_seq_lock, f);
67 hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, 53 hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
68 &ieee802154_coordinator_family, flags, req); 54 &nl802154_family, flags, req);
69 spin_unlock_irqrestore(&ieee802154_seq_lock, f); 55 spin_unlock_irqrestore(&ieee802154_seq_lock, f);
70 if (!hdr) { 56 if (!hdr) {
71 nlmsg_free(msg); 57 nlmsg_free(msg);
@@ -75,7 +61,7 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
75 return msg; 61 return msg;
76} 62}
77 63
78static int ieee802154_nl_finish(struct sk_buff *msg) 64int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
79{ 65{
80 /* XXX: nlh is right at the start of msg */ 66 /* XXX: nlh is right at the start of msg */
81 void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); 67 void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
@@ -83,607 +69,70 @@ static int ieee802154_nl_finish(struct sk_buff *msg)
83 if (genlmsg_end(msg, hdr) < 0) 69 if (genlmsg_end(msg, hdr) < 0)
84 goto out; 70 goto out;
85 71
86 return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id, 72 return genlmsg_multicast(msg, 0, group, GFP_ATOMIC);
87 GFP_ATOMIC);
88out: 73out:
89 nlmsg_free(msg); 74 nlmsg_free(msg);
90 return -ENOBUFS; 75 return -ENOBUFS;
91} 76}
92 77
93int ieee802154_nl_assoc_indic(struct net_device *dev, 78struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
94 struct ieee802154_addr *addr, u8 cap) 79 int flags, u8 req)
95{
96 struct sk_buff *msg;
97
98 pr_debug("%s\n", __func__);
99
100 if (addr->addr_type != IEEE802154_ADDR_LONG) {
101 pr_err("%s: received non-long source address!\n", __func__);
102 return -EINVAL;
103 }
104
105 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
106 if (!msg)
107 return -ENOBUFS;
108
109 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
110 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
111 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
112 dev->dev_addr);
113
114 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
115 addr->hwaddr);
116
117 NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
118
119 return ieee802154_nl_finish(msg);
120
121nla_put_failure:
122 nlmsg_free(msg);
123 return -ENOBUFS;
124}
125EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
126
127int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
128 u8 status)
129{
130 struct sk_buff *msg;
131
132 pr_debug("%s\n", __func__);
133
134 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
135 if (!msg)
136 return -ENOBUFS;
137
138 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
139 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
140 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
141 dev->dev_addr);
142
143 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
144 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
145
146 return ieee802154_nl_finish(msg);
147
148nla_put_failure:
149 nlmsg_free(msg);
150 return -ENOBUFS;
151}
152EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
153
154int ieee802154_nl_disassoc_indic(struct net_device *dev,
155 struct ieee802154_addr *addr, u8 reason)
156{
157 struct sk_buff *msg;
158
159 pr_debug("%s\n", __func__);
160
161 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
162 if (!msg)
163 return -ENOBUFS;
164
165 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
166 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
167 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
168 dev->dev_addr);
169
170 if (addr->addr_type == IEEE802154_ADDR_LONG)
171 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
172 addr->hwaddr);
173 else
174 NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
175 addr->short_addr);
176
177 NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
178
179 return ieee802154_nl_finish(msg);
180
181nla_put_failure:
182 nlmsg_free(msg);
183 return -ENOBUFS;
184}
185EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
186
187int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
188{
189 struct sk_buff *msg;
190
191 pr_debug("%s\n", __func__);
192
193 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
194 if (!msg)
195 return -ENOBUFS;
196
197 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
198 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
199 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
200 dev->dev_addr);
201
202 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
203
204 return ieee802154_nl_finish(msg);
205
206nla_put_failure:
207 nlmsg_free(msg);
208 return -ENOBUFS;
209}
210EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
211
212int ieee802154_nl_beacon_indic(struct net_device *dev,
213 u16 panid, u16 coord_addr)
214{
215 struct sk_buff *msg;
216
217 pr_debug("%s\n", __func__);
218
219 msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
220 if (!msg)
221 return -ENOBUFS;
222
223 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
224 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
225 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
226 dev->dev_addr);
227 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
228 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
229
230 return ieee802154_nl_finish(msg);
231
232nla_put_failure:
233 nlmsg_free(msg);
234 return -ENOBUFS;
235}
236EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
237
238int ieee802154_nl_scan_confirm(struct net_device *dev,
239 u8 status, u8 scan_type, u32 unscanned, u8 page,
240 u8 *edl/* , struct list_head *pan_desc_list */)
241{
242 struct sk_buff *msg;
243
244 pr_debug("%s\n", __func__);
245
246 msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
247 if (!msg)
248 return -ENOBUFS;
249
250 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
251 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
252 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
253 dev->dev_addr);
254
255 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
256 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
257 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
258 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
259
260 if (edl)
261 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
262
263 return ieee802154_nl_finish(msg);
264
265nla_put_failure:
266 nlmsg_free(msg);
267 return -ENOBUFS;
268}
269EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
270
271int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
272{
273 struct sk_buff *msg;
274
275 pr_debug("%s\n", __func__);
276
277 msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
278 if (!msg)
279 return -ENOBUFS;
280
281 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
282 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
283 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
284 dev->dev_addr);
285
286 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
287
288 return ieee802154_nl_finish(msg);
289
290nla_put_failure:
291 nlmsg_free(msg);
292 return -ENOBUFS;
293}
294EXPORT_SYMBOL(ieee802154_nl_start_confirm);
295
296static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
297 u32 seq, int flags, struct net_device *dev)
298{ 80{
299 void *hdr; 81 void *hdr;
82 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
300 83
301 pr_debug("%s\n", __func__); 84 if (!msg)
302
303 hdr = genlmsg_put(msg, 0, seq, &ieee802154_coordinator_family, flags,
304 IEEE802154_LIST_IFACE);
305 if (!hdr)
306 goto out;
307
308 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
309 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
310
311 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
312 dev->dev_addr);
313 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
314 ieee802154_mlme_ops(dev)->get_short_addr(dev));
315 NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
316 ieee802154_mlme_ops(dev)->get_pan_id(dev));
317 return genlmsg_end(msg, hdr);
318
319nla_put_failure:
320 genlmsg_cancel(msg, hdr);
321out:
322 return -EMSGSIZE;
323}
324
325/* Requests from userspace */
326static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
327{
328 struct net_device *dev;
329
330 if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
331 char name[IFNAMSIZ + 1];
332 nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
333 sizeof(name));
334 dev = dev_get_by_name(&init_net, name);
335 } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
336 dev = dev_get_by_index(&init_net,
337 nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
338 else
339 return NULL;
340
341 if (!dev)
342 return NULL; 85 return NULL;
343 86
344 if (dev->type != ARPHRD_IEEE802154) { 87 hdr = genlmsg_put_reply(msg, info,
345 dev_put(dev); 88 &nl802154_family, flags, req);
89 if (!hdr) {
90 nlmsg_free(msg);
346 return NULL; 91 return NULL;
347 } 92 }
348 93
349 return dev; 94 return msg;
350}
351
352static int ieee802154_associate_req(struct sk_buff *skb,
353 struct genl_info *info)
354{
355 struct net_device *dev;
356 struct ieee802154_addr addr;
357 u8 page;
358 int ret = -EINVAL;
359
360 if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
361 !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
362 (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
363 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
364 !info->attrs[IEEE802154_ATTR_CAPABILITY])
365 return -EINVAL;
366
367 dev = ieee802154_nl_get_dev(info);
368 if (!dev)
369 return -ENODEV;
370
371 if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
372 addr.addr_type = IEEE802154_ADDR_LONG;
373 nla_memcpy(addr.hwaddr,
374 info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
375 IEEE802154_ADDR_LEN);
376 } else {
377 addr.addr_type = IEEE802154_ADDR_SHORT;
378 addr.short_addr = nla_get_u16(
379 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
380 }
381 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
382
383 if (info->attrs[IEEE802154_ATTR_PAGE])
384 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
385 else
386 page = 0;
387
388 ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
389 nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
390 page,
391 nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
392
393 dev_put(dev);
394 return ret;
395}
396
397static int ieee802154_associate_resp(struct sk_buff *skb,
398 struct genl_info *info)
399{
400 struct net_device *dev;
401 struct ieee802154_addr addr;
402 int ret = -EINVAL;
403
404 if (!info->attrs[IEEE802154_ATTR_STATUS] ||
405 !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
406 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
407 return -EINVAL;
408
409 dev = ieee802154_nl_get_dev(info);
410 if (!dev)
411 return -ENODEV;
412
413 addr.addr_type = IEEE802154_ADDR_LONG;
414 nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
415 IEEE802154_ADDR_LEN);
416 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
417
418
419 ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
420 nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
421 nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
422
423 dev_put(dev);
424 return ret;
425}
426
427static int ieee802154_disassociate_req(struct sk_buff *skb,
428 struct genl_info *info)
429{
430 struct net_device *dev;
431 struct ieee802154_addr addr;
432 int ret = -EINVAL;
433
434 if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
435 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
436 !info->attrs[IEEE802154_ATTR_REASON])
437 return -EINVAL;
438
439 dev = ieee802154_nl_get_dev(info);
440 if (!dev)
441 return -ENODEV;
442
443 if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
444 addr.addr_type = IEEE802154_ADDR_LONG;
445 nla_memcpy(addr.hwaddr,
446 info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
447 IEEE802154_ADDR_LEN);
448 } else {
449 addr.addr_type = IEEE802154_ADDR_SHORT;
450 addr.short_addr = nla_get_u16(
451 info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
452 }
453 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
454
455 ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
456 nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
457
458 dev_put(dev);
459 return ret;
460}
461
462/*
463 * PANid, channel, beacon_order = 15, superframe_order = 15,
464 * PAN_coordinator, battery_life_extension = 0,
465 * coord_realignment = 0, security_enable = 0
466*/
467static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
468{
469 struct net_device *dev;
470 struct ieee802154_addr addr;
471
472 u8 channel, bcn_ord, sf_ord;
473 u8 page;
474 int pan_coord, blx, coord_realign;
475 int ret;
476
477 if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
478 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
479 !info->attrs[IEEE802154_ATTR_CHANNEL] ||
480 !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
481 !info->attrs[IEEE802154_ATTR_SF_ORD] ||
482 !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
483 !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
484 !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
485 )
486 return -EINVAL;
487
488 dev = ieee802154_nl_get_dev(info);
489 if (!dev)
490 return -ENODEV;
491
492 addr.addr_type = IEEE802154_ADDR_SHORT;
493 addr.short_addr = nla_get_u16(
494 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
495 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
496
497 channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
498 bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
499 sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
500 pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
501 blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
502 coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
503
504 if (info->attrs[IEEE802154_ATTR_PAGE])
505 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
506 else
507 page = 0;
508
509
510 if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
511 ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
512 dev_put(dev);
513 return -EINVAL;
514 }
515
516 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
517 bcn_ord, sf_ord, pan_coord, blx, coord_realign);
518
519 dev_put(dev);
520 return ret;
521}
522
523static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
524{
525 struct net_device *dev;
526 int ret;
527 u8 type;
528 u32 channels;
529 u8 duration;
530 u8 page;
531
532 if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
533 !info->attrs[IEEE802154_ATTR_CHANNELS] ||
534 !info->attrs[IEEE802154_ATTR_DURATION])
535 return -EINVAL;
536
537 dev = ieee802154_nl_get_dev(info);
538 if (!dev)
539 return -ENODEV;
540
541 type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
542 channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
543 duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
544
545 if (info->attrs[IEEE802154_ATTR_PAGE])
546 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
547 else
548 page = 0;
549
550
551 ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
552 duration);
553
554 dev_put(dev);
555 return ret;
556} 95}
557 96
558static int ieee802154_list_iface(struct sk_buff *skb, 97int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
559 struct genl_info *info)
560{ 98{
561 /* Request for interface name, index, type, IEEE address, 99 /* XXX: nlh is right at the start of msg */
562 PAN Id, short address */ 100 void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
563 struct sk_buff *msg;
564 struct net_device *dev = NULL;
565 int rc = -ENOBUFS;
566
567 pr_debug("%s\n", __func__);
568
569 dev = ieee802154_nl_get_dev(info);
570 if (!dev)
571 return -ENODEV;
572
573 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
574 if (!msg)
575 goto out_dev;
576
577 rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
578 0, dev);
579 if (rc < 0)
580 goto out_free;
581 101
582 dev_put(dev); 102 if (genlmsg_end(msg, hdr) < 0)
103 goto out;
583 104
584 return genlmsg_unicast(&init_net, msg, info->snd_pid); 105 return genlmsg_reply(msg, info);
585out_free: 106out:
586 nlmsg_free(msg); 107 nlmsg_free(msg);
587out_dev: 108 return -ENOBUFS;
588 dev_put(dev);
589 return rc;
590
591}
592
593static int ieee802154_dump_iface(struct sk_buff *skb,
594 struct netlink_callback *cb)
595{
596 struct net *net = sock_net(skb->sk);
597 struct net_device *dev;
598 int idx;
599 int s_idx = cb->args[0];
600
601 pr_debug("%s\n", __func__);
602
603 idx = 0;
604 for_each_netdev(net, dev) {
605 if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
606 goto cont;
607
608 if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
609 cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
610 break;
611cont:
612 idx++;
613 }
614 cb->args[0] = idx;
615
616 return skb->len;
617} 109}
618 110
619#define IEEE802154_OP(_cmd, _func) \ 111int __init ieee802154_nl_init(void)
620 { \
621 .cmd = _cmd, \
622 .policy = ieee802154_policy, \
623 .doit = _func, \
624 .dumpit = NULL, \
625 .flags = GENL_ADMIN_PERM, \
626 }
627
628#define IEEE802154_DUMP(_cmd, _func, _dump) \
629 { \
630 .cmd = _cmd, \
631 .policy = ieee802154_policy, \
632 .doit = _func, \
633 .dumpit = _dump, \
634 }
635
636static struct genl_ops ieee802154_coordinator_ops[] = {
637 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
638 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
639 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
640 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
641 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
642 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
643 ieee802154_dump_iface),
644};
645
646static int __init ieee802154_nl_init(void)
647{ 112{
648 int rc; 113 int rc;
649 int i;
650 114
651 rc = genl_register_family(&ieee802154_coordinator_family); 115 rc = genl_register_family(&nl802154_family);
652 if (rc) 116 if (rc)
653 goto fail; 117 goto fail;
654 118
655 rc = genl_register_mc_group(&ieee802154_coordinator_family, 119 rc = nl802154_mac_register();
656 &ieee802154_coord_mcgrp);
657 if (rc) 120 if (rc)
658 goto fail; 121 goto fail;
659 122
660 rc = genl_register_mc_group(&ieee802154_coordinator_family, 123 rc = nl802154_phy_register();
661 &ieee802154_beacon_mcgrp);
662 if (rc) 124 if (rc)
663 goto fail; 125 goto fail;
664 126
665
666 for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
667 rc = genl_register_ops(&ieee802154_coordinator_family,
668 &ieee802154_coordinator_ops[i]);
669 if (rc)
670 goto fail;
671 }
672
673 return 0; 127 return 0;
674 128
675fail: 129fail:
676 genl_unregister_family(&ieee802154_coordinator_family); 130 genl_unregister_family(&nl802154_family);
677 return rc; 131 return rc;
678} 132}
679module_init(ieee802154_nl_init);
680 133
681static void __exit ieee802154_nl_exit(void) 134void __exit ieee802154_nl_exit(void)
682{ 135{
683 genl_unregister_family(&ieee802154_coordinator_family); 136 genl_unregister_family(&nl802154_family);
684} 137}
685module_exit(ieee802154_nl_exit);
686
687MODULE_LICENSE("GPL v2");
688MODULE_DESCRIPTION("ieee 802.15.4 configuration interface");
689 138
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
new file mode 100644
index 000000000000..135c1678fb11
--- /dev/null
+++ b/net/ieee802154/nl-mac.c
@@ -0,0 +1,617 @@
1/*
2 * Netlink inteface for IEEE 802.15.4 stack
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 * Maxim Osipov <maxim.osipov@siemens.com>
23 */
24
25#include <linux/kernel.h>
26#include <linux/if_arp.h>
27#include <linux/netdevice.h>
28#include <net/netlink.h>
29#include <net/genetlink.h>
30#include <net/sock.h>
31#include <linux/nl802154.h>
32#include <net/af_ieee802154.h>
33#include <net/nl802154.h>
34#include <net/ieee802154.h>
35#include <net/ieee802154_netdev.h>
36#include <net/wpan-phy.h>
37
38#include "ieee802154.h"
39
40static struct genl_multicast_group ieee802154_coord_mcgrp = {
41 .name = IEEE802154_MCAST_COORD_NAME,
42};
43
44static struct genl_multicast_group ieee802154_beacon_mcgrp = {
45 .name = IEEE802154_MCAST_BEACON_NAME,
46};
47
48int ieee802154_nl_assoc_indic(struct net_device *dev,
49 struct ieee802154_addr *addr, u8 cap)
50{
51 struct sk_buff *msg;
52
53 pr_debug("%s\n", __func__);
54
55 if (addr->addr_type != IEEE802154_ADDR_LONG) {
56 pr_err("%s: received non-long source address!\n", __func__);
57 return -EINVAL;
58 }
59
60 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
61 if (!msg)
62 return -ENOBUFS;
63
64 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
65 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
66 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
67 dev->dev_addr);
68
69 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
70 addr->hwaddr);
71
72 NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
73
74 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
75
76nla_put_failure:
77 nlmsg_free(msg);
78 return -ENOBUFS;
79}
80EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
81
82int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
83 u8 status)
84{
85 struct sk_buff *msg;
86
87 pr_debug("%s\n", __func__);
88
89 msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
90 if (!msg)
91 return -ENOBUFS;
92
93 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
94 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
95 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
96 dev->dev_addr);
97
98 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
99 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
100
101 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
102
103nla_put_failure:
104 nlmsg_free(msg);
105 return -ENOBUFS;
106}
107EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
108
109int ieee802154_nl_disassoc_indic(struct net_device *dev,
110 struct ieee802154_addr *addr, u8 reason)
111{
112 struct sk_buff *msg;
113
114 pr_debug("%s\n", __func__);
115
116 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
117 if (!msg)
118 return -ENOBUFS;
119
120 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
121 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
122 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
123 dev->dev_addr);
124
125 if (addr->addr_type == IEEE802154_ADDR_LONG)
126 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
127 addr->hwaddr);
128 else
129 NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
130 addr->short_addr);
131
132 NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
133
134 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
135
136nla_put_failure:
137 nlmsg_free(msg);
138 return -ENOBUFS;
139}
140EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
141
142int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
143{
144 struct sk_buff *msg;
145
146 pr_debug("%s\n", __func__);
147
148 msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
149 if (!msg)
150 return -ENOBUFS;
151
152 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
153 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
154 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
155 dev->dev_addr);
156
157 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
158
159 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
160
161nla_put_failure:
162 nlmsg_free(msg);
163 return -ENOBUFS;
164}
165EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
166
167int ieee802154_nl_beacon_indic(struct net_device *dev,
168 u16 panid, u16 coord_addr)
169{
170 struct sk_buff *msg;
171
172 pr_debug("%s\n", __func__);
173
174 msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
175 if (!msg)
176 return -ENOBUFS;
177
178 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
179 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
180 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
181 dev->dev_addr);
182 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
183 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
184
185 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
186
187nla_put_failure:
188 nlmsg_free(msg);
189 return -ENOBUFS;
190}
191EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
192
193int ieee802154_nl_scan_confirm(struct net_device *dev,
194 u8 status, u8 scan_type, u32 unscanned, u8 page,
195 u8 *edl/* , struct list_head *pan_desc_list */)
196{
197 struct sk_buff *msg;
198
199 pr_debug("%s\n", __func__);
200
201 msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
202 if (!msg)
203 return -ENOBUFS;
204
205 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
206 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
207 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
208 dev->dev_addr);
209
210 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
211 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
212 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
213 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
214
215 if (edl)
216 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
217
218 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
219
220nla_put_failure:
221 nlmsg_free(msg);
222 return -ENOBUFS;
223}
224EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
225
226int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
227{
228 struct sk_buff *msg;
229
230 pr_debug("%s\n", __func__);
231
232 msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
233 if (!msg)
234 return -ENOBUFS;
235
236 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
237 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
238 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
239 dev->dev_addr);
240
241 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
242
243 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
244
245nla_put_failure:
246 nlmsg_free(msg);
247 return -ENOBUFS;
248}
249EXPORT_SYMBOL(ieee802154_nl_start_confirm);
250
251static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
252 u32 seq, int flags, struct net_device *dev)
253{
254 void *hdr;
255 struct wpan_phy *phy;
256
257 pr_debug("%s\n", __func__);
258
259 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
260 IEEE802154_LIST_IFACE);
261 if (!hdr)
262 goto out;
263
264 phy = ieee802154_mlme_ops(dev)->get_phy(dev);
265 BUG_ON(!phy);
266
267 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
268 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
269 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
270
271 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
272 dev->dev_addr);
273 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
274 ieee802154_mlme_ops(dev)->get_short_addr(dev));
275 NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
276 ieee802154_mlme_ops(dev)->get_pan_id(dev));
277 wpan_phy_put(phy);
278 return genlmsg_end(msg, hdr);
279
280nla_put_failure:
281 wpan_phy_put(phy);
282 genlmsg_cancel(msg, hdr);
283out:
284 return -EMSGSIZE;
285}
286
287/* Requests from userspace */
288static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
289{
290 struct net_device *dev;
291
292 if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
293 char name[IFNAMSIZ + 1];
294 nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
295 sizeof(name));
296 dev = dev_get_by_name(&init_net, name);
297 } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
298 dev = dev_get_by_index(&init_net,
299 nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
300 else
301 return NULL;
302
303 if (!dev)
304 return NULL;
305
306 if (dev->type != ARPHRD_IEEE802154) {
307 dev_put(dev);
308 return NULL;
309 }
310
311 return dev;
312}
313
314static int ieee802154_associate_req(struct sk_buff *skb,
315 struct genl_info *info)
316{
317 struct net_device *dev;
318 struct ieee802154_addr addr;
319 u8 page;
320 int ret = -EINVAL;
321
322 if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
323 !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
324 (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
325 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
326 !info->attrs[IEEE802154_ATTR_CAPABILITY])
327 return -EINVAL;
328
329 dev = ieee802154_nl_get_dev(info);
330 if (!dev)
331 return -ENODEV;
332
333 if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
334 addr.addr_type = IEEE802154_ADDR_LONG;
335 nla_memcpy(addr.hwaddr,
336 info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
337 IEEE802154_ADDR_LEN);
338 } else {
339 addr.addr_type = IEEE802154_ADDR_SHORT;
340 addr.short_addr = nla_get_u16(
341 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
342 }
343 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
344
345 if (info->attrs[IEEE802154_ATTR_PAGE])
346 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
347 else
348 page = 0;
349
350 ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
351 nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
352 page,
353 nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
354
355 dev_put(dev);
356 return ret;
357}
358
359static int ieee802154_associate_resp(struct sk_buff *skb,
360 struct genl_info *info)
361{
362 struct net_device *dev;
363 struct ieee802154_addr addr;
364 int ret = -EINVAL;
365
366 if (!info->attrs[IEEE802154_ATTR_STATUS] ||
367 !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
368 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
369 return -EINVAL;
370
371 dev = ieee802154_nl_get_dev(info);
372 if (!dev)
373 return -ENODEV;
374
375 addr.addr_type = IEEE802154_ADDR_LONG;
376 nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
377 IEEE802154_ADDR_LEN);
378 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
379
380
381 ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
382 nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
383 nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
384
385 dev_put(dev);
386 return ret;
387}
388
389static int ieee802154_disassociate_req(struct sk_buff *skb,
390 struct genl_info *info)
391{
392 struct net_device *dev;
393 struct ieee802154_addr addr;
394 int ret = -EINVAL;
395
396 if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
397 !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
398 !info->attrs[IEEE802154_ATTR_REASON])
399 return -EINVAL;
400
401 dev = ieee802154_nl_get_dev(info);
402 if (!dev)
403 return -ENODEV;
404
405 if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
406 addr.addr_type = IEEE802154_ADDR_LONG;
407 nla_memcpy(addr.hwaddr,
408 info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
409 IEEE802154_ADDR_LEN);
410 } else {
411 addr.addr_type = IEEE802154_ADDR_SHORT;
412 addr.short_addr = nla_get_u16(
413 info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
414 }
415 addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
416
417 ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
418 nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
419
420 dev_put(dev);
421 return ret;
422}
423
424/*
425 * PANid, channel, beacon_order = 15, superframe_order = 15,
426 * PAN_coordinator, battery_life_extension = 0,
427 * coord_realignment = 0, security_enable = 0
428*/
429static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
430{
431 struct net_device *dev;
432 struct ieee802154_addr addr;
433
434 u8 channel, bcn_ord, sf_ord;
435 u8 page;
436 int pan_coord, blx, coord_realign;
437 int ret;
438
439 if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
440 !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
441 !info->attrs[IEEE802154_ATTR_CHANNEL] ||
442 !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
443 !info->attrs[IEEE802154_ATTR_SF_ORD] ||
444 !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
445 !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
446 !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
447 )
448 return -EINVAL;
449
450 dev = ieee802154_nl_get_dev(info);
451 if (!dev)
452 return -ENODEV;
453
454 addr.addr_type = IEEE802154_ADDR_SHORT;
455 addr.short_addr = nla_get_u16(
456 info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
457 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
458
459 channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
460 bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
461 sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
462 pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
463 blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
464 coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
465
466 if (info->attrs[IEEE802154_ATTR_PAGE])
467 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
468 else
469 page = 0;
470
471
472 if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
473 ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
474 dev_put(dev);
475 return -EINVAL;
476 }
477
478 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
479 bcn_ord, sf_ord, pan_coord, blx, coord_realign);
480
481 dev_put(dev);
482 return ret;
483}
484
485static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
486{
487 struct net_device *dev;
488 int ret;
489 u8 type;
490 u32 channels;
491 u8 duration;
492 u8 page;
493
494 if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
495 !info->attrs[IEEE802154_ATTR_CHANNELS] ||
496 !info->attrs[IEEE802154_ATTR_DURATION])
497 return -EINVAL;
498
499 dev = ieee802154_nl_get_dev(info);
500 if (!dev)
501 return -ENODEV;
502
503 type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
504 channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
505 duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
506
507 if (info->attrs[IEEE802154_ATTR_PAGE])
508 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
509 else
510 page = 0;
511
512
513 ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
514 duration);
515
516 dev_put(dev);
517 return ret;
518}
519
520static int ieee802154_list_iface(struct sk_buff *skb,
521 struct genl_info *info)
522{
523 /* Request for interface name, index, type, IEEE address,
524 PAN Id, short address */
525 struct sk_buff *msg;
526 struct net_device *dev = NULL;
527 int rc = -ENOBUFS;
528
529 pr_debug("%s\n", __func__);
530
531 dev = ieee802154_nl_get_dev(info);
532 if (!dev)
533 return -ENODEV;
534
535 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
536 if (!msg)
537 goto out_dev;
538
539 rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
540 0, dev);
541 if (rc < 0)
542 goto out_free;
543
544 dev_put(dev);
545
546 return genlmsg_reply(msg, info);
547out_free:
548 nlmsg_free(msg);
549out_dev:
550 dev_put(dev);
551 return rc;
552
553}
554
555static int ieee802154_dump_iface(struct sk_buff *skb,
556 struct netlink_callback *cb)
557{
558 struct net *net = sock_net(skb->sk);
559 struct net_device *dev;
560 int idx;
561 int s_idx = cb->args[0];
562
563 pr_debug("%s\n", __func__);
564
565 idx = 0;
566 for_each_netdev(net, dev) {
567 if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
568 goto cont;
569
570 if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
571 cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
572 break;
573cont:
574 idx++;
575 }
576 cb->args[0] = idx;
577
578 return skb->len;
579}
580
581static struct genl_ops ieee802154_coordinator_ops[] = {
582 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
583 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
584 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
585 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
586 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
587 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
588 ieee802154_dump_iface),
589};
590
591/*
592 * No need to unregister as family unregistration will do it.
593 */
594int nl802154_mac_register(void)
595{
596 int i;
597 int rc;
598
599 rc = genl_register_mc_group(&nl802154_family,
600 &ieee802154_coord_mcgrp);
601 if (rc)
602 return rc;
603
604 rc = genl_register_mc_group(&nl802154_family,
605 &ieee802154_beacon_mcgrp);
606 if (rc)
607 return rc;
608
609 for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
610 rc = genl_register_ops(&nl802154_family,
611 &ieee802154_coordinator_ops[i]);
612 if (rc)
613 return rc;
614 }
615
616 return 0;
617}
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
new file mode 100644
index 000000000000..199a2d9d12f9
--- /dev/null
+++ b/net/ieee802154/nl-phy.c
@@ -0,0 +1,344 @@
1/*
2 * Netlink inteface for IEEE 802.15.4 stack
3 *
4 * Copyright 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 * Maxim Osipov <maxim.osipov@siemens.com>
23 */
24
25#include <linux/kernel.h>
26#include <net/netlink.h>
27#include <net/genetlink.h>
28#include <net/wpan-phy.h>
29#include <net/af_ieee802154.h>
30#include <net/ieee802154_netdev.h>
31#include <net/rtnetlink.h> /* for rtnl_{un,}lock */
32#include <linux/nl802154.h>
33
34#include "ieee802154.h"
35
36static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
37 u32 seq, int flags, struct wpan_phy *phy)
38{
39 void *hdr;
40 int i, pages = 0;
41 uint32_t *buf = kzalloc(32 * sizeof(uint32_t), GFP_KERNEL);
42
43 pr_debug("%s\n", __func__);
44
45 if (!buf)
46 goto out;
47
48 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
49 IEEE802154_LIST_PHY);
50 if (!hdr)
51 goto out;
52
53 mutex_lock(&phy->pib_lock);
54 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
55
56 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page);
57 NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel);
58 for (i = 0; i < 32; i++) {
59 if (phy->channels_supported[i])
60 buf[pages++] = phy->channels_supported[i] | (i << 27);
61 }
62 if (pages)
63 NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
64 pages * sizeof(uint32_t), buf);
65
66 mutex_unlock(&phy->pib_lock);
67 return genlmsg_end(msg, hdr);
68
69nla_put_failure:
70 mutex_unlock(&phy->pib_lock);
71 genlmsg_cancel(msg, hdr);
72out:
73 kfree(buf);
74 return -EMSGSIZE;
75}
76
77static int ieee802154_list_phy(struct sk_buff *skb,
78 struct genl_info *info)
79{
80 /* Request for interface name, index, type, IEEE address,
81 PAN Id, short address */
82 struct sk_buff *msg;
83 struct wpan_phy *phy;
84 const char *name;
85 int rc = -ENOBUFS;
86
87 pr_debug("%s\n", __func__);
88
89 if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
90 return -EINVAL;
91
92 name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
93 if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
94 return -EINVAL; /* phy name should be null-terminated */
95
96
97 phy = wpan_phy_find(name);
98 if (!phy)
99 return -ENODEV;
100
101 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
102 if (!msg)
103 goto out_dev;
104
105 rc = ieee802154_nl_fill_phy(msg, info->snd_pid, info->snd_seq,
106 0, phy);
107 if (rc < 0)
108 goto out_free;
109
110 wpan_phy_put(phy);
111
112 return genlmsg_reply(msg, info);
113out_free:
114 nlmsg_free(msg);
115out_dev:
116 wpan_phy_put(phy);
117 return rc;
118
119}
120
121struct dump_phy_data {
122 struct sk_buff *skb;
123 struct netlink_callback *cb;
124 int idx, s_idx;
125};
126
127static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
128{
129 int rc;
130 struct dump_phy_data *data = _data;
131
132 pr_debug("%s\n", __func__);
133
134 if (data->idx++ < data->s_idx)
135 return 0;
136
137 rc = ieee802154_nl_fill_phy(data->skb,
138 NETLINK_CB(data->cb->skb).pid,
139 data->cb->nlh->nlmsg_seq,
140 NLM_F_MULTI,
141 phy);
142
143 if (rc < 0) {
144 data->idx--;
145 return rc;
146 }
147
148 return 0;
149}
150
151static int ieee802154_dump_phy(struct sk_buff *skb,
152 struct netlink_callback *cb)
153{
154 struct dump_phy_data data = {
155 .cb = cb,
156 .skb = skb,
157 .s_idx = cb->args[0],
158 .idx = 0,
159 };
160
161 pr_debug("%s\n", __func__);
162
163 wpan_phy_for_each(ieee802154_dump_phy_iter, &data);
164
165 cb->args[0] = data.idx;
166
167 return skb->len;
168}
169
170static int ieee802154_add_iface(struct sk_buff *skb,
171 struct genl_info *info)
172{
173 struct sk_buff *msg;
174 struct wpan_phy *phy;
175 const char *name;
176 const char *devname;
177 int rc = -ENOBUFS;
178 struct net_device *dev;
179
180 pr_debug("%s\n", __func__);
181
182 if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
183 return -EINVAL;
184
185 name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
186 if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
187 return -EINVAL; /* phy name should be null-terminated */
188
189 if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
190 devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
191 if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
192 != '\0')
193 return -EINVAL; /* phy name should be null-terminated */
194 } else {
195 devname = "wpan%d";
196 }
197
198 if (strlen(devname) >= IFNAMSIZ)
199 return -ENAMETOOLONG;
200
201 phy = wpan_phy_find(name);
202 if (!phy)
203 return -ENODEV;
204
205 msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE);
206 if (!msg)
207 goto out_dev;
208
209 if (!phy->add_iface) {
210 rc = -EINVAL;
211 goto nla_put_failure;
212 }
213
214 dev = phy->add_iface(phy, devname);
215 if (IS_ERR(dev)) {
216 rc = PTR_ERR(dev);
217 goto nla_put_failure;
218 }
219
220 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
221 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
222
223 dev_put(dev);
224
225 wpan_phy_put(phy);
226
227 return ieee802154_nl_reply(msg, info);
228
229nla_put_failure:
230 nlmsg_free(msg);
231out_dev:
232 wpan_phy_put(phy);
233 return rc;
234}
235
236static int ieee802154_del_iface(struct sk_buff *skb,
237 struct genl_info *info)
238{
239 struct sk_buff *msg;
240 struct wpan_phy *phy;
241 const char *name;
242 int rc;
243 struct net_device *dev;
244
245 pr_debug("%s\n", __func__);
246
247 if (!info->attrs[IEEE802154_ATTR_DEV_NAME])
248 return -EINVAL;
249
250 name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
251 if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
252 return -EINVAL; /* name should be null-terminated */
253
254 dev = dev_get_by_name(genl_info_net(info), name);
255 if (!dev)
256 return -ENODEV;
257
258 phy = ieee802154_mlme_ops(dev)->get_phy(dev);
259 BUG_ON(!phy);
260
261 rc = -EINVAL;
262 /* phy name is optional, but should be checked if it's given */
263 if (info->attrs[IEEE802154_ATTR_PHY_NAME]) {
264 struct wpan_phy *phy2;
265
266 const char *pname =
267 nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
268 if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1]
269 != '\0')
270 /* name should be null-terminated */
271 goto out_dev;
272
273 phy2 = wpan_phy_find(pname);
274 if (!phy2)
275 goto out_dev;
276
277 if (phy != phy2) {
278 wpan_phy_put(phy2);
279 goto out_dev;
280 }
281 }
282
283 rc = -ENOBUFS;
284
285 msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE);
286 if (!msg)
287 goto out_dev;
288
289 if (!phy->del_iface) {
290 rc = -EINVAL;
291 goto nla_put_failure;
292 }
293
294 rtnl_lock();
295 phy->del_iface(phy, dev);
296
297 /* We don't have device anymore */
298 dev_put(dev);
299 dev = NULL;
300
301 rtnl_unlock();
302
303
304 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
305 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name);
306
307 wpan_phy_put(phy);
308
309 return ieee802154_nl_reply(msg, info);
310
311nla_put_failure:
312 nlmsg_free(msg);
313out_dev:
314 wpan_phy_put(phy);
315 if (dev)
316 dev_put(dev);
317
318 return rc;
319}
320
321static struct genl_ops ieee802154_phy_ops[] = {
322 IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
323 ieee802154_dump_phy),
324 IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
325 IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
326};
327
328/*
329 * No need to unregister as family unregistration will do it.
330 */
331int nl802154_phy_register(void)
332{
333 int i;
334 int rc;
335
336 for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) {
337 rc = genl_register_ops(&nl802154_family,
338 &ieee802154_phy_ops[i]);
339 if (rc)
340 return rc;
341 }
342
343 return 0;
344}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 2363ebee02e7..6adda4d46f95 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -27,6 +27,7 @@
27const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { 27const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
28 [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, 28 [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
29 [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, 29 [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
30 [IEEE802154_ATTR_PHY_NAME] = { .type = NLA_STRING, },
30 31
31 [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, }, 32 [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, },
32 [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, }, 33 [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
@@ -50,5 +51,6 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
50 [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, }, 51 [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, },
51 [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, 52 [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
52 [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, 53 [IEEE802154_ATTR_ED_LIST] = { .len = 27 },
54 [IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, },
53}; 55};
54 56
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index f306604da67a..268691256a6d 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -22,6 +22,8 @@
22 22
23#include <net/wpan-phy.h> 23#include <net/wpan-phy.h>
24 24
25#include "ieee802154.h"
26
25#define MASTER_SHOW_COMPLEX(name, format_string, args...) \ 27#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
26static ssize_t name ## _show(struct device *dev, \ 28static ssize_t name ## _show(struct device *dev, \
27 struct device_attribute *attr, char *buf) \ 29 struct device_attribute *attr, char *buf) \
@@ -30,7 +32,7 @@ static ssize_t name ## _show(struct device *dev, \
30 int ret; \ 32 int ret; \
31 \ 33 \
32 mutex_lock(&phy->pib_lock); \ 34 mutex_lock(&phy->pib_lock); \
33 ret = sprintf(buf, format_string "\n", args); \ 35 ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
34 mutex_unlock(&phy->pib_lock); \ 36 mutex_unlock(&phy->pib_lock); \
35 return ret; \ 37 return ret; \
36} 38}
@@ -40,12 +42,30 @@ static ssize_t name ## _show(struct device *dev, \
40 42
41MASTER_SHOW(current_channel, "%d"); 43MASTER_SHOW(current_channel, "%d");
42MASTER_SHOW(current_page, "%d"); 44MASTER_SHOW(current_page, "%d");
43MASTER_SHOW(channels_supported, "%#x");
44MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB", 45MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
45 ((signed char) (phy->transmit_power << 2)) >> 2, 46 ((signed char) (phy->transmit_power << 2)) >> 2,
46 (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 ); 47 (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 );
47MASTER_SHOW(cca_mode, "%d"); 48MASTER_SHOW(cca_mode, "%d");
48 49
50static ssize_t channels_supported_show(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
54 int ret;
55 int i, len = 0;
56
57 mutex_lock(&phy->pib_lock);
58 for (i = 0; i < 32; i++) {
59 ret = snprintf(buf + len, PAGE_SIZE - len,
60 "%#09x\n", phy->channels_supported[i]);
61 if (ret < 0)
62 break;
63 len += ret;
64 }
65 mutex_unlock(&phy->pib_lock);
66 return len;
67}
68
49static struct device_attribute pmib_attrs[] = { 69static struct device_attribute pmib_attrs[] = {
50 __ATTR_RO(current_channel), 70 __ATTR_RO(current_channel),
51 __ATTR_RO(current_page), 71 __ATTR_RO(current_page),
@@ -91,6 +111,31 @@ struct wpan_phy *wpan_phy_find(const char *str)
91} 111}
92EXPORT_SYMBOL(wpan_phy_find); 112EXPORT_SYMBOL(wpan_phy_find);
93 113
114struct wpan_phy_iter_data {
115 int (*fn)(struct wpan_phy *phy, void *data);
116 void *data;
117};
118
119static int wpan_phy_iter(struct device *dev, void *_data)
120{
121 struct wpan_phy_iter_data *wpid = _data;
122 struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
123 return wpid->fn(phy, wpid->data);
124}
125
126int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
127 void *data)
128{
129 struct wpan_phy_iter_data wpid = {
130 .fn = fn,
131 .data = data,
132 };
133
134 return class_for_each_device(&wpan_phy_class, NULL,
135 &wpid, wpan_phy_iter);
136}
137EXPORT_SYMBOL(wpan_phy_for_each);
138
94static int wpan_phy_idx_valid(int idx) 139static int wpan_phy_idx_valid(int idx)
95{ 140{
96 return idx >= 0; 141 return idx >= 0;
@@ -118,14 +163,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
118 163
119 phy->dev.class = &wpan_phy_class; 164 phy->dev.class = &wpan_phy_class;
120 165
166 phy->current_channel = -1; /* not initialised */
167 phy->current_page = 0; /* for compatibility */
168
121 return phy; 169 return phy;
122} 170}
123EXPORT_SYMBOL(wpan_phy_alloc); 171EXPORT_SYMBOL(wpan_phy_alloc);
124 172
125int wpan_phy_register(struct device *parent, struct wpan_phy *phy) 173int wpan_phy_register(struct wpan_phy *phy)
126{ 174{
127 phy->dev.parent = parent;
128
129 return device_add(&phy->dev); 175 return device_add(&phy->dev);
130} 176}
131EXPORT_SYMBOL(wpan_phy_register); 177EXPORT_SYMBOL(wpan_phy_register);
@@ -144,16 +190,31 @@ EXPORT_SYMBOL(wpan_phy_free);
144 190
145static int __init wpan_phy_class_init(void) 191static int __init wpan_phy_class_init(void)
146{ 192{
147 return class_register(&wpan_phy_class); 193 int rc;
194 rc = class_register(&wpan_phy_class);
195 if (rc)
196 goto err;
197
198 rc = ieee802154_nl_init();
199 if (rc)
200 goto err_nl;
201
202 return 0;
203err_nl:
204 class_unregister(&wpan_phy_class);
205err:
206 return rc;
148} 207}
149subsys_initcall(wpan_phy_class_init); 208subsys_initcall(wpan_phy_class_init);
150 209
151static void __exit wpan_phy_class_exit(void) 210static void __exit wpan_phy_class_exit(void)
152{ 211{
212 ieee802154_nl_exit();
153 class_unregister(&wpan_phy_class); 213 class_unregister(&wpan_phy_class);
154} 214}
155module_exit(wpan_phy_class_exit); 215module_exit(wpan_phy_class_exit);
156 216
157MODULE_DESCRIPTION("IEEE 802.15.4 device class");
158MODULE_LICENSE("GPL v2"); 217MODULE_LICENSE("GPL v2");
218MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface");
219MODULE_AUTHOR("Dmitry Eremin-Solenikov");
159 220
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 04a14b1600ac..7d12c6a9b19b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -262,7 +262,8 @@ static inline int inet_netns_ok(struct net *net, int protocol)
262 * Create an inet socket. 262 * Create an inet socket.
263 */ 263 */
264 264
265static int inet_create(struct net *net, struct socket *sock, int protocol) 265static int inet_create(struct net *net, struct socket *sock, int protocol,
266 int kern)
266{ 267{
267 struct sock *sk; 268 struct sock *sk;
268 struct inet_protosw *answer; 269 struct inet_protosw *answer;
@@ -325,7 +326,7 @@ lookup_protocol:
325 } 326 }
326 327
327 err = -EPERM; 328 err = -EPERM;
328 if (answer->capability > 0 && !capable(answer->capability)) 329 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
329 goto out_rcu_unlock; 330 goto out_rcu_unlock;
330 331
331 err = -EAFNOSUPPORT; 332 err = -EAFNOSUPPORT;
@@ -685,7 +686,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
685{ 686{
686 struct sock *sk = sock->sk; 687 struct sock *sk = sock->sk;
687 struct inet_sock *inet = inet_sk(sk); 688 struct inet_sock *inet = inet_sk(sk);
688 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; 689 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
689 690
690 sin->sin_family = AF_INET; 691 sin->sin_family = AF_INET;
691 if (peer) { 692 if (peer) {
@@ -947,7 +948,6 @@ static struct inet_protosw inetsw_array[] =
947 .protocol = IPPROTO_TCP, 948 .protocol = IPPROTO_TCP,
948 .prot = &tcp_prot, 949 .prot = &tcp_prot,
949 .ops = &inet_stream_ops, 950 .ops = &inet_stream_ops,
950 .capability = -1,
951 .no_check = 0, 951 .no_check = 0,
952 .flags = INET_PROTOSW_PERMANENT | 952 .flags = INET_PROTOSW_PERMANENT |
953 INET_PROTOSW_ICSK, 953 INET_PROTOSW_ICSK,
@@ -958,7 +958,6 @@ static struct inet_protosw inetsw_array[] =
958 .protocol = IPPROTO_UDP, 958 .protocol = IPPROTO_UDP,
959 .prot = &udp_prot, 959 .prot = &udp_prot,
960 .ops = &inet_dgram_ops, 960 .ops = &inet_dgram_ops,
961 .capability = -1,
962 .no_check = UDP_CSUM_DEFAULT, 961 .no_check = UDP_CSUM_DEFAULT,
963 .flags = INET_PROTOSW_PERMANENT, 962 .flags = INET_PROTOSW_PERMANENT,
964 }, 963 },
@@ -969,7 +968,6 @@ static struct inet_protosw inetsw_array[] =
969 .protocol = IPPROTO_IP, /* wild card */ 968 .protocol = IPPROTO_IP, /* wild card */
970 .prot = &raw_prot, 969 .prot = &raw_prot,
971 .ops = &inet_sockraw_ops, 970 .ops = &inet_sockraw_ops,
972 .capability = CAP_NET_RAW,
973 .no_check = UDP_CSUM_DEFAULT, 971 .no_check = UDP_CSUM_DEFAULT,
974 .flags = INET_PROTOSW_REUSE, 972 .flags = INET_PROTOSW_REUSE,
975 } 973 }
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index d07b0c1dd350..7ed3e4ae93ae 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -444,7 +444,7 @@ static int ah_init_state(struct xfrm_state *x)
444 } 444 }
445 445
446 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 446 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
447 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 447 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
448 448
449 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 449 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
450 450
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5df2f6a0b0f0..e3126612fcbb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -140,11 +140,11 @@ void in_dev_finish_destroy(struct in_device *idev)
140#endif 140#endif
141 dev_put(dev); 141 dev_put(dev);
142 if (!idev->dead) 142 if (!idev->dead)
143 printk("Freeing alive in_device %p\n", idev); 143 pr_err("Freeing alive in_device %p\n", idev);
144 else { 144 else
145 kfree(idev); 145 kfree(idev);
146 }
147} 146}
147EXPORT_SYMBOL(in_dev_finish_destroy);
148 148
149static struct in_device *inetdev_init(struct net_device *dev) 149static struct in_device *inetdev_init(struct net_device *dev)
150{ 150{
@@ -159,7 +159,8 @@ static struct in_device *inetdev_init(struct net_device *dev)
159 sizeof(in_dev->cnf)); 159 sizeof(in_dev->cnf));
160 in_dev->cnf.sysctl = NULL; 160 in_dev->cnf.sysctl = NULL;
161 in_dev->dev = dev; 161 in_dev->dev = dev;
162 if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) 162 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
163 if (!in_dev->arp_parms)
163 goto out_kfree; 164 goto out_kfree;
164 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) 165 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
165 dev_disable_lro(dev); 166 dev_disable_lro(dev);
@@ -405,13 +406,15 @@ struct in_device *inetdev_by_index(struct net *net, int ifindex)
405{ 406{
406 struct net_device *dev; 407 struct net_device *dev;
407 struct in_device *in_dev = NULL; 408 struct in_device *in_dev = NULL;
408 read_lock(&dev_base_lock); 409
409 dev = __dev_get_by_index(net, ifindex); 410 rcu_read_lock();
411 dev = dev_get_by_index_rcu(net, ifindex);
410 if (dev) 412 if (dev)
411 in_dev = in_dev_get(dev); 413 in_dev = in_dev_get(dev);
412 read_unlock(&dev_base_lock); 414 rcu_read_unlock();
413 return in_dev; 415 return in_dev;
414} 416}
417EXPORT_SYMBOL(inetdev_by_index);
415 418
416/* Called only from RTNL semaphored context. No locks. */ 419/* Called only from RTNL semaphored context. No locks. */
417 420
@@ -557,7 +560,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
557 * Determine a default network mask, based on the IP address. 560 * Determine a default network mask, based on the IP address.
558 */ 561 */
559 562
560static __inline__ int inet_abc_len(__be32 addr) 563static inline int inet_abc_len(__be32 addr)
561{ 564{
562 int rc = -1; /* Something else, probably a multicast. */ 565 int rc = -1; /* Something else, probably a multicast. */
563 566
@@ -646,13 +649,15 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
646 rtnl_lock(); 649 rtnl_lock();
647 650
648 ret = -ENODEV; 651 ret = -ENODEV;
649 if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL) 652 dev = __dev_get_by_name(net, ifr.ifr_name);
653 if (!dev)
650 goto done; 654 goto done;
651 655
652 if (colon) 656 if (colon)
653 *colon = ':'; 657 *colon = ':';
654 658
655 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { 659 in_dev = __in_dev_get_rtnl(dev);
660 if (in_dev) {
656 if (tryaddrmatch) { 661 if (tryaddrmatch) {
657 /* Matthias Andree */ 662 /* Matthias Andree */
658 /* compare label and address (4.4BSD style) */ 663 /* compare label and address (4.4BSD style) */
@@ -720,7 +725,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
720 725
721 if (!ifa) { 726 if (!ifa) {
722 ret = -ENOBUFS; 727 ret = -ENOBUFS;
723 if ((ifa = inet_alloc_ifa()) == NULL) 728 ifa = inet_alloc_ifa();
729 if (!ifa)
724 break; 730 break;
725 if (colon) 731 if (colon)
726 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ); 732 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
@@ -822,10 +828,10 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
822 struct ifreq ifr; 828 struct ifreq ifr;
823 int done = 0; 829 int done = 0;
824 830
825 if (!in_dev || (ifa = in_dev->ifa_list) == NULL) 831 if (!in_dev)
826 goto out; 832 goto out;
827 833
828 for (; ifa; ifa = ifa->ifa_next) { 834 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
829 if (!buf) { 835 if (!buf) {
830 done += sizeof(ifr); 836 done += sizeof(ifr);
831 continue; 837 continue;
@@ -875,36 +881,33 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
875 if (!addr) 881 if (!addr)
876 addr = ifa->ifa_local; 882 addr = ifa->ifa_local;
877 } endfor_ifa(in_dev); 883 } endfor_ifa(in_dev);
878no_in_dev:
879 rcu_read_unlock();
880 884
881 if (addr) 885 if (addr)
882 goto out; 886 goto out_unlock;
887no_in_dev:
883 888
884 /* Not loopback addresses on loopback should be preferred 889 /* Not loopback addresses on loopback should be preferred
885 in this case. It is importnat that lo is the first interface 890 in this case. It is importnat that lo is the first interface
886 in dev_base list. 891 in dev_base list.
887 */ 892 */
888 read_lock(&dev_base_lock); 893 for_each_netdev_rcu(net, dev) {
889 rcu_read_lock(); 894 in_dev = __in_dev_get_rcu(dev);
890 for_each_netdev(net, dev) { 895 if (!in_dev)
891 if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
892 continue; 896 continue;
893 897
894 for_primary_ifa(in_dev) { 898 for_primary_ifa(in_dev) {
895 if (ifa->ifa_scope != RT_SCOPE_LINK && 899 if (ifa->ifa_scope != RT_SCOPE_LINK &&
896 ifa->ifa_scope <= scope) { 900 ifa->ifa_scope <= scope) {
897 addr = ifa->ifa_local; 901 addr = ifa->ifa_local;
898 goto out_unlock_both; 902 goto out_unlock;
899 } 903 }
900 } endfor_ifa(in_dev); 904 } endfor_ifa(in_dev);
901 } 905 }
902out_unlock_both: 906out_unlock:
903 read_unlock(&dev_base_lock);
904 rcu_read_unlock(); 907 rcu_read_unlock();
905out:
906 return addr; 908 return addr;
907} 909}
910EXPORT_SYMBOL(inet_select_addr);
908 911
909static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst, 912static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
910 __be32 local, int scope) 913 __be32 local, int scope)
@@ -940,7 +943,7 @@ static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
940 } 943 }
941 } endfor_ifa(in_dev); 944 } endfor_ifa(in_dev);
942 945
943 return same? addr : 0; 946 return same ? addr : 0;
944} 947}
945 948
946/* 949/*
@@ -961,17 +964,16 @@ __be32 inet_confirm_addr(struct in_device *in_dev,
961 return confirm_addr_indev(in_dev, dst, local, scope); 964 return confirm_addr_indev(in_dev, dst, local, scope);
962 965
963 net = dev_net(in_dev->dev); 966 net = dev_net(in_dev->dev);
964 read_lock(&dev_base_lock);
965 rcu_read_lock(); 967 rcu_read_lock();
966 for_each_netdev(net, dev) { 968 for_each_netdev_rcu(net, dev) {
967 if ((in_dev = __in_dev_get_rcu(dev))) { 969 in_dev = __in_dev_get_rcu(dev);
970 if (in_dev) {
968 addr = confirm_addr_indev(in_dev, dst, local, scope); 971 addr = confirm_addr_indev(in_dev, dst, local, scope);
969 if (addr) 972 if (addr)
970 break; 973 break;
971 } 974 }
972 } 975 }
973 rcu_read_unlock(); 976 rcu_read_unlock();
974 read_unlock(&dev_base_lock);
975 977
976 return addr; 978 return addr;
977} 979}
@@ -984,14 +986,16 @@ int register_inetaddr_notifier(struct notifier_block *nb)
984{ 986{
985 return blocking_notifier_chain_register(&inetaddr_chain, nb); 987 return blocking_notifier_chain_register(&inetaddr_chain, nb);
986} 988}
989EXPORT_SYMBOL(register_inetaddr_notifier);
987 990
988int unregister_inetaddr_notifier(struct notifier_block *nb) 991int unregister_inetaddr_notifier(struct notifier_block *nb)
989{ 992{
990 return blocking_notifier_chain_unregister(&inetaddr_chain, nb); 993 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
991} 994}
995EXPORT_SYMBOL(unregister_inetaddr_notifier);
992 996
993/* Rename ifa_labels for a device name change. Make some effort to preserve existing 997/* Rename ifa_labels for a device name change. Make some effort to preserve
994 * alias numbering and to create unique labels if possible. 998 * existing alias numbering and to create unique labels if possible.
995*/ 999*/
996static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) 1000static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
997{ 1001{
@@ -1010,11 +1014,10 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1010 sprintf(old, ":%d", named); 1014 sprintf(old, ":%d", named);
1011 dot = old; 1015 dot = old;
1012 } 1016 }
1013 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) { 1017 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1014 strcat(ifa->ifa_label, dot); 1018 strcat(ifa->ifa_label, dot);
1015 } else { 1019 else
1016 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); 1020 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1017 }
1018skip: 1021skip:
1019 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); 1022 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1020 } 1023 }
@@ -1061,8 +1064,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1061 if (!inetdev_valid_mtu(dev->mtu)) 1064 if (!inetdev_valid_mtu(dev->mtu))
1062 break; 1065 break;
1063 if (dev->flags & IFF_LOOPBACK) { 1066 if (dev->flags & IFF_LOOPBACK) {
1064 struct in_ifaddr *ifa; 1067 struct in_ifaddr *ifa = inet_alloc_ifa();
1065 if ((ifa = inet_alloc_ifa()) != NULL) { 1068
1069 if (ifa) {
1066 ifa->ifa_local = 1070 ifa->ifa_local =
1067 ifa->ifa_address = htonl(INADDR_LOOPBACK); 1071 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1068 ifa->ifa_prefixlen = 8; 1072 ifa->ifa_prefixlen = 8;
@@ -1170,38 +1174,54 @@ nla_put_failure:
1170static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 1174static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1171{ 1175{
1172 struct net *net = sock_net(skb->sk); 1176 struct net *net = sock_net(skb->sk);
1173 int idx, ip_idx; 1177 int h, s_h;
1178 int idx, s_idx;
1179 int ip_idx, s_ip_idx;
1174 struct net_device *dev; 1180 struct net_device *dev;
1175 struct in_device *in_dev; 1181 struct in_device *in_dev;
1176 struct in_ifaddr *ifa; 1182 struct in_ifaddr *ifa;
1177 int s_ip_idx, s_idx = cb->args[0]; 1183 struct hlist_head *head;
1184 struct hlist_node *node;
1178 1185
1179 s_ip_idx = ip_idx = cb->args[1]; 1186 s_h = cb->args[0];
1180 idx = 0; 1187 s_idx = idx = cb->args[1];
1181 for_each_netdev(net, dev) { 1188 s_ip_idx = ip_idx = cb->args[2];
1182 if (idx < s_idx) 1189
1183 goto cont; 1190 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1184 if (idx > s_idx) 1191 idx = 0;
1185 s_ip_idx = 0; 1192 head = &net->dev_index_head[h];
1186 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) 1193 rcu_read_lock();
1187 goto cont; 1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1188 1195 if (idx < s_idx)
1189 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; 1196 goto cont;
1190 ifa = ifa->ifa_next, ip_idx++) { 1197 if (idx > s_idx)
1191 if (ip_idx < s_ip_idx) 1198 s_ip_idx = 0;
1192 continue; 1199 in_dev = __in_dev_get_rcu(dev);
1193 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 1200 if (!in_dev)
1201 goto cont;
1202
1203 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1204 ifa = ifa->ifa_next, ip_idx++) {
1205 if (ip_idx < s_ip_idx)
1206 continue;
1207 if (inet_fill_ifaddr(skb, ifa,
1208 NETLINK_CB(cb->skb).pid,
1194 cb->nlh->nlmsg_seq, 1209 cb->nlh->nlmsg_seq,
1195 RTM_NEWADDR, NLM_F_MULTI) <= 0) 1210 RTM_NEWADDR, NLM_F_MULTI) <= 0) {
1196 goto done; 1211 rcu_read_unlock();
1197 } 1212 goto done;
1213 }
1214 }
1198cont: 1215cont:
1199 idx++; 1216 idx++;
1217 }
1218 rcu_read_unlock();
1200 } 1219 }
1201 1220
1202done: 1221done:
1203 cb->args[0] = idx; 1222 cb->args[0] = h;
1204 cb->args[1] = ip_idx; 1223 cb->args[1] = idx;
1224 cb->args[2] = ip_idx;
1205 1225
1206 return skb->len; 1226 return skb->len;
1207} 1227}
@@ -1239,18 +1259,18 @@ static void devinet_copy_dflt_conf(struct net *net, int i)
1239{ 1259{
1240 struct net_device *dev; 1260 struct net_device *dev;
1241 1261
1242 read_lock(&dev_base_lock); 1262 rcu_read_lock();
1243 for_each_netdev(net, dev) { 1263 for_each_netdev_rcu(net, dev) {
1244 struct in_device *in_dev; 1264 struct in_device *in_dev;
1245 rcu_read_lock(); 1265
1246 in_dev = __in_dev_get_rcu(dev); 1266 in_dev = __in_dev_get_rcu(dev);
1247 if (in_dev && !test_bit(i, in_dev->cnf.state)) 1267 if (in_dev && !test_bit(i, in_dev->cnf.state))
1248 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i]; 1268 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
1249 rcu_read_unlock();
1250 } 1269 }
1251 read_unlock(&dev_base_lock); 1270 rcu_read_unlock();
1252} 1271}
1253 1272
1273/* called with RTNL locked */
1254static void inet_forward_change(struct net *net) 1274static void inet_forward_change(struct net *net)
1255{ 1275{
1256 struct net_device *dev; 1276 struct net_device *dev;
@@ -1259,7 +1279,6 @@ static void inet_forward_change(struct net *net)
1259 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; 1279 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
1260 IPV4_DEVCONF_DFLT(net, FORWARDING) = on; 1280 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
1261 1281
1262 read_lock(&dev_base_lock);
1263 for_each_netdev(net, dev) { 1282 for_each_netdev(net, dev) {
1264 struct in_device *in_dev; 1283 struct in_device *in_dev;
1265 if (on) 1284 if (on)
@@ -1270,7 +1289,6 @@ static void inet_forward_change(struct net *net)
1270 IN_DEV_CONF_SET(in_dev, FORWARDING, on); 1289 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
1271 rcu_read_unlock(); 1290 rcu_read_unlock();
1272 } 1291 }
1273 read_unlock(&dev_base_lock);
1274} 1292}
1275 1293
1276static int devinet_conf_proc(ctl_table *ctl, int write, 1294static int devinet_conf_proc(ctl_table *ctl, int write,
@@ -1450,6 +1468,7 @@ static struct devinet_sysctl_table {
1450 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"), 1468 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
1451 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, 1469 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
1452 "accept_source_route"), 1470 "accept_source_route"),
1471 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
1453 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), 1472 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
1454 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), 1473 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
1455 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), 1474 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
@@ -1587,7 +1606,7 @@ static __net_init int devinet_init_net(struct net *net)
1587 all = &ipv4_devconf; 1606 all = &ipv4_devconf;
1588 dflt = &ipv4_devconf_dflt; 1607 dflt = &ipv4_devconf_dflt;
1589 1608
1590 if (net != &init_net) { 1609 if (!net_eq(net, &init_net)) {
1591 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL); 1610 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
1592 if (all == NULL) 1611 if (all == NULL)
1593 goto err_alloc_all; 1612 goto err_alloc_all;
@@ -1680,8 +1699,3 @@ void __init devinet_init(void)
1680 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); 1699 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
1681} 1700}
1682 1701
1683EXPORT_SYMBOL(in_dev_finish_destroy);
1684EXPORT_SYMBOL(inet_select_addr);
1685EXPORT_SYMBOL(inetdev_by_index);
1686EXPORT_SYMBOL(register_inetaddr_notifier);
1687EXPORT_SYMBOL(unregister_inetaddr_notifier);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 12f7287e902d..1948895beb6d 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -530,7 +530,7 @@ static int esp_init_authenc(struct xfrm_state *x)
530 } 530 }
531 531
532 err = crypto_aead_setauthsize( 532 err = crypto_aead_setauthsize(
533 aead, aalg_desc->uinfo.auth.icv_truncbits / 8); 533 aead, x->aalg->alg_trunc_len / 8);
534 if (err) 534 if (err)
535 goto free_key; 535 goto free_key;
536 } 536 }
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f73dbed0f0d7..3323168ee52d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -229,25 +229,29 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
229 */ 229 */
230 230
231int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, 231int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
232 struct net_device *dev, __be32 *spec_dst, u32 *itag) 232 struct net_device *dev, __be32 *spec_dst,
233 u32 *itag, u32 mark)
233{ 234{
234 struct in_device *in_dev; 235 struct in_device *in_dev;
235 struct flowi fl = { .nl_u = { .ip4_u = 236 struct flowi fl = { .nl_u = { .ip4_u =
236 { .daddr = src, 237 { .daddr = src,
237 .saddr = dst, 238 .saddr = dst,
238 .tos = tos } }, 239 .tos = tos } },
240 .mark = mark,
239 .iif = oif }; 241 .iif = oif };
242
240 struct fib_result res; 243 struct fib_result res;
241 int no_addr, rpf; 244 int no_addr, rpf, accept_local;
242 int ret; 245 int ret;
243 struct net *net; 246 struct net *net;
244 247
245 no_addr = rpf = 0; 248 no_addr = rpf = accept_local = 0;
246 rcu_read_lock(); 249 rcu_read_lock();
247 in_dev = __in_dev_get_rcu(dev); 250 in_dev = __in_dev_get_rcu(dev);
248 if (in_dev) { 251 if (in_dev) {
249 no_addr = in_dev->ifa_list == NULL; 252 no_addr = in_dev->ifa_list == NULL;
250 rpf = IN_DEV_RPFILTER(in_dev); 253 rpf = IN_DEV_RPFILTER(in_dev);
254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
251 } 255 }
252 rcu_read_unlock(); 256 rcu_read_unlock();
253 257
@@ -257,8 +261,10 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
257 net = dev_net(dev); 261 net = dev_net(dev);
258 if (fib_lookup(net, &fl, &res)) 262 if (fib_lookup(net, &fl, &res))
259 goto last_resort; 263 goto last_resort;
260 if (res.type != RTN_UNICAST) 264 if (res.type != RTN_UNICAST) {
261 goto e_inval_res; 265 if (res.type != RTN_LOCAL || !accept_local)
266 goto e_inval_res;
267 }
262 *spec_dst = FIB_RES_PREFSRC(res); 268 *spec_dst = FIB_RES_PREFSRC(res);
263 fib_combine_itag(itag, &res); 269 fib_combine_itag(itag, &res);
264#ifdef CONFIG_IP_ROUTE_MULTIPATH 270#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -892,11 +898,11 @@ static void nl_fib_lookup_exit(struct net *net)
892 net->ipv4.fibnl = NULL; 898 net->ipv4.fibnl = NULL;
893} 899}
894 900
895static void fib_disable_ip(struct net_device *dev, int force) 901static void fib_disable_ip(struct net_device *dev, int force, int delay)
896{ 902{
897 if (fib_sync_down_dev(dev, force)) 903 if (fib_sync_down_dev(dev, force))
898 fib_flush(dev_net(dev)); 904 fib_flush(dev_net(dev));
899 rt_cache_flush(dev_net(dev), 0); 905 rt_cache_flush(dev_net(dev), delay);
900 arp_ifdown(dev); 906 arp_ifdown(dev);
901} 907}
902 908
@@ -919,7 +925,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
919 /* Last address was deleted from this interface. 925 /* Last address was deleted from this interface.
920 Disable IP. 926 Disable IP.
921 */ 927 */
922 fib_disable_ip(dev, 1); 928 fib_disable_ip(dev, 1, 0);
923 } else { 929 } else {
924 rt_cache_flush(dev_net(dev), -1); 930 rt_cache_flush(dev_net(dev), -1);
925 } 931 }
@@ -934,7 +940,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
934 struct in_device *in_dev = __in_dev_get_rtnl(dev); 940 struct in_device *in_dev = __in_dev_get_rtnl(dev);
935 941
936 if (event == NETDEV_UNREGISTER) { 942 if (event == NETDEV_UNREGISTER) {
937 fib_disable_ip(dev, 2); 943 fib_disable_ip(dev, 2, -1);
938 return NOTIFY_DONE; 944 return NOTIFY_DONE;
939 } 945 }
940 946
@@ -952,12 +958,15 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
952 rt_cache_flush(dev_net(dev), -1); 958 rt_cache_flush(dev_net(dev), -1);
953 break; 959 break;
954 case NETDEV_DOWN: 960 case NETDEV_DOWN:
955 fib_disable_ip(dev, 0); 961 fib_disable_ip(dev, 0, 0);
956 break; 962 break;
957 case NETDEV_CHANGEMTU: 963 case NETDEV_CHANGEMTU:
958 case NETDEV_CHANGE: 964 case NETDEV_CHANGE:
959 rt_cache_flush(dev_net(dev), 0); 965 rt_cache_flush(dev_net(dev), 0);
960 break; 966 break;
967 case NETDEV_UNREGISTER_BATCH:
968 rt_cache_flush_batch();
969 break;
961 } 970 }
962 return NOTIFY_DONE; 971 return NOTIFY_DONE;
963} 972}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 835262c2b867..ca2d07b1c706 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -284,7 +284,7 @@ static int fib_default_rules_init(struct fib_rules_ops *ops)
284{ 284{
285 int err; 285 int err;
286 286
287 err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, FIB_RULE_PERMANENT); 287 err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, 0);
288 if (err < 0) 288 if (err < 0)
289 return err; 289 return err;
290 err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0); 290 err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0);
@@ -301,13 +301,9 @@ int __net_init fib4_rules_init(struct net *net)
301 int err; 301 int err;
302 struct fib_rules_ops *ops; 302 struct fib_rules_ops *ops;
303 303
304 ops = kmemdup(&fib4_rules_ops_template, sizeof(*ops), GFP_KERNEL); 304 ops = fib_rules_register(&fib4_rules_ops_template, net);
305 if (ops == NULL) 305 if (IS_ERR(ops))
306 return -ENOMEM; 306 return PTR_ERR(ops);
307 INIT_LIST_HEAD(&ops->rules_list);
308 ops->fro_net = net;
309
310 fib_rules_register(ops);
311 307
312 err = fib_default_rules_init(ops); 308 err = fib_default_rules_init(ops);
313 if (err < 0) 309 if (err < 0)
@@ -318,12 +314,10 @@ int __net_init fib4_rules_init(struct net *net)
318fail: 314fail:
319 /* also cleans all rules already added */ 315 /* also cleans all rules already added */
320 fib_rules_unregister(ops); 316 fib_rules_unregister(ops);
321 kfree(ops);
322 return err; 317 return err;
323} 318}
324 319
325void __net_exit fib4_rules_exit(struct net *net) 320void __net_exit fib4_rules_exit(struct net *net)
326{ 321{
327 fib_rules_unregister(net->ipv4.rules_ops); 322 fib_rules_unregister(net->ipv4.rules_ops);
328 kfree(net->ipv4.rules_ops);
329} 323}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 9b096d6ff3f2..ed19aa6919c2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -228,7 +228,7 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
228 head = &fib_info_hash[hash]; 228 head = &fib_info_hash[hash];
229 229
230 hlist_for_each_entry(fi, node, head, fib_hash) { 230 hlist_for_each_entry(fi, node, head, fib_hash) {
231 if (fi->fib_net != nfi->fib_net) 231 if (!net_eq(fi->fib_net, nfi->fib_net))
232 continue; 232 continue;
233 if (fi->fib_nhs != nfi->fib_nhs) 233 if (fi->fib_nhs != nfi->fib_nhs)
234 continue; 234 continue;
@@ -1047,7 +1047,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
1047 return 0; 1047 return 0;
1048 1048
1049 hlist_for_each_entry(fi, node, head, fib_lhash) { 1049 hlist_for_each_entry(fi, node, head, fib_lhash) {
1050 if (fi->fib_net != net) 1050 if (!net_eq(fi->fib_net, net))
1051 continue; 1051 continue;
1052 if (fi->fib_prefsrc == local) { 1052 if (fi->fib_prefsrc == local) {
1053 fi->fib_flags |= RTNH_F_DEAD; 1053 fi->fib_flags |= RTNH_F_DEAD;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 84adb5754c96..fe11f60ce41b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -501,15 +501,16 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
501 if (!(rt->rt_flags & RTCF_LOCAL)) { 501 if (!(rt->rt_flags & RTCF_LOCAL)) {
502 struct net_device *dev = NULL; 502 struct net_device *dev = NULL;
503 503
504 rcu_read_lock();
504 if (rt->fl.iif && 505 if (rt->fl.iif &&
505 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 506 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
506 dev = dev_get_by_index(net, rt->fl.iif); 507 dev = dev_get_by_index_rcu(net, rt->fl.iif);
507 508
508 if (dev) { 509 if (dev)
509 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); 510 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
510 dev_put(dev); 511 else
511 } else
512 saddr = 0; 512 saddr = 0;
513 rcu_read_unlock();
513 } 514 }
514 515
515 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | 516 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d41e5de79a82..76c08402c933 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1899,8 +1899,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1899 err = -EADDRNOTAVAIL; 1899 err = -EADDRNOTAVAIL;
1900 1900
1901 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 1901 for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1902 if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr 1902 if ((pmc->multi.imr_multiaddr.s_addr ==
1903 && pmc->multi.imr_ifindex == imr.imr_ifindex) 1903 imr.imr_multiaddr.s_addr) &&
1904 (pmc->multi.imr_ifindex == imr.imr_ifindex))
1904 break; 1905 break;
1905 } 1906 }
1906 if (!pmc) { /* must have a prior join */ 1907 if (!pmc) { /* must have a prior join */
@@ -2311,9 +2312,10 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2311 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2312 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2312 2313
2313 state->in_dev = NULL; 2314 state->in_dev = NULL;
2314 for_each_netdev(net, state->dev) { 2315 for_each_netdev_rcu(net, state->dev) {
2315 struct in_device *in_dev; 2316 struct in_device *in_dev;
2316 in_dev = in_dev_get(state->dev); 2317
2318 in_dev = __in_dev_get_rcu(state->dev);
2317 if (!in_dev) 2319 if (!in_dev)
2318 continue; 2320 continue;
2319 read_lock(&in_dev->mc_list_lock); 2321 read_lock(&in_dev->mc_list_lock);
@@ -2323,7 +2325,6 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2323 break; 2325 break;
2324 } 2326 }
2325 read_unlock(&in_dev->mc_list_lock); 2327 read_unlock(&in_dev->mc_list_lock);
2326 in_dev_put(in_dev);
2327 } 2328 }
2328 return im; 2329 return im;
2329} 2330}
@@ -2333,16 +2334,15 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
2333 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2334 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2334 im = im->next; 2335 im = im->next;
2335 while (!im) { 2336 while (!im) {
2336 if (likely(state->in_dev != NULL)) { 2337 if (likely(state->in_dev != NULL))
2337 read_unlock(&state->in_dev->mc_list_lock); 2338 read_unlock(&state->in_dev->mc_list_lock);
2338 in_dev_put(state->in_dev); 2339
2339 } 2340 state->dev = next_net_device_rcu(state->dev);
2340 state->dev = next_net_device(state->dev);
2341 if (!state->dev) { 2341 if (!state->dev) {
2342 state->in_dev = NULL; 2342 state->in_dev = NULL;
2343 break; 2343 break;
2344 } 2344 }
2345 state->in_dev = in_dev_get(state->dev); 2345 state->in_dev = __in_dev_get_rcu(state->dev);
2346 if (!state->in_dev) 2346 if (!state->in_dev)
2347 continue; 2347 continue;
2348 read_lock(&state->in_dev->mc_list_lock); 2348 read_lock(&state->in_dev->mc_list_lock);
@@ -2361,9 +2361,9 @@ static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2361} 2361}
2362 2362
2363static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) 2363static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
2364 __acquires(dev_base_lock) 2364 __acquires(rcu)
2365{ 2365{
2366 read_lock(&dev_base_lock); 2366 rcu_read_lock();
2367 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2367 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2368} 2368}
2369 2369
@@ -2379,16 +2379,15 @@ static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2379} 2379}
2380 2380
2381static void igmp_mc_seq_stop(struct seq_file *seq, void *v) 2381static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2382 __releases(dev_base_lock) 2382 __releases(rcu)
2383{ 2383{
2384 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2384 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2385 if (likely(state->in_dev != NULL)) { 2385 if (likely(state->in_dev != NULL)) {
2386 read_unlock(&state->in_dev->mc_list_lock); 2386 read_unlock(&state->in_dev->mc_list_lock);
2387 in_dev_put(state->in_dev);
2388 state->in_dev = NULL; 2387 state->in_dev = NULL;
2389 } 2388 }
2390 state->dev = NULL; 2389 state->dev = NULL;
2391 read_unlock(&dev_base_lock); 2390 rcu_read_unlock();
2392} 2391}
2393 2392
2394static int igmp_mc_seq_show(struct seq_file *seq, void *v) 2393static int igmp_mc_seq_show(struct seq_file *seq, void *v)
@@ -2462,9 +2461,9 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2462 2461
2463 state->idev = NULL; 2462 state->idev = NULL;
2464 state->im = NULL; 2463 state->im = NULL;
2465 for_each_netdev(net, state->dev) { 2464 for_each_netdev_rcu(net, state->dev) {
2466 struct in_device *idev; 2465 struct in_device *idev;
2467 idev = in_dev_get(state->dev); 2466 idev = __in_dev_get_rcu(state->dev);
2468 if (unlikely(idev == NULL)) 2467 if (unlikely(idev == NULL))
2469 continue; 2468 continue;
2470 read_lock(&idev->mc_list_lock); 2469 read_lock(&idev->mc_list_lock);
@@ -2480,7 +2479,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2480 spin_unlock_bh(&im->lock); 2479 spin_unlock_bh(&im->lock);
2481 } 2480 }
2482 read_unlock(&idev->mc_list_lock); 2481 read_unlock(&idev->mc_list_lock);
2483 in_dev_put(idev);
2484 } 2482 }
2485 return psf; 2483 return psf;
2486} 2484}
@@ -2494,16 +2492,15 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2494 spin_unlock_bh(&state->im->lock); 2492 spin_unlock_bh(&state->im->lock);
2495 state->im = state->im->next; 2493 state->im = state->im->next;
2496 while (!state->im) { 2494 while (!state->im) {
2497 if (likely(state->idev != NULL)) { 2495 if (likely(state->idev != NULL))
2498 read_unlock(&state->idev->mc_list_lock); 2496 read_unlock(&state->idev->mc_list_lock);
2499 in_dev_put(state->idev); 2497
2500 } 2498 state->dev = next_net_device_rcu(state->dev);
2501 state->dev = next_net_device(state->dev);
2502 if (!state->dev) { 2499 if (!state->dev) {
2503 state->idev = NULL; 2500 state->idev = NULL;
2504 goto out; 2501 goto out;
2505 } 2502 }
2506 state->idev = in_dev_get(state->dev); 2503 state->idev = __in_dev_get_rcu(state->dev);
2507 if (!state->idev) 2504 if (!state->idev)
2508 continue; 2505 continue;
2509 read_lock(&state->idev->mc_list_lock); 2506 read_lock(&state->idev->mc_list_lock);
@@ -2528,8 +2525,9 @@ static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2528} 2525}
2529 2526
2530static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2527static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2528 __acquires(rcu)
2531{ 2529{
2532 read_lock(&dev_base_lock); 2530 rcu_read_lock();
2533 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2531 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2534} 2532}
2535 2533
@@ -2545,6 +2543,7 @@ static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2545} 2543}
2546 2544
2547static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) 2545static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2546 __releases(rcu)
2548{ 2547{
2549 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2548 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2550 if (likely(state->im != NULL)) { 2549 if (likely(state->im != NULL)) {
@@ -2553,11 +2552,10 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2553 } 2552 }
2554 if (likely(state->idev != NULL)) { 2553 if (likely(state->idev != NULL)) {
2555 read_unlock(&state->idev->mc_list_lock); 2554 read_unlock(&state->idev->mc_list_lock);
2556 in_dev_put(state->idev);
2557 state->idev = NULL; 2555 state->idev = NULL;
2558 } 2556 }
2559 state->dev = NULL; 2557 state->dev = NULL;
2560 read_unlock(&dev_base_lock); 2558 rcu_read_unlock();
2561} 2559}
2562 2560
2563static int igmp_mcf_seq_show(struct seq_file *seq, void *v) 2561static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 26fb50e91311..ee16475f8fc3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -112,7 +112,7 @@ again:
112 hashinfo->bhash_size)]; 112 hashinfo->bhash_size)];
113 spin_lock(&head->lock); 113 spin_lock(&head->lock);
114 inet_bind_bucket_for_each(tb, node, &head->chain) 114 inet_bind_bucket_for_each(tb, node, &head->chain)
115 if (ib_net(tb) == net && tb->port == rover) { 115 if (net_eq(ib_net(tb), net) && tb->port == rover) {
116 if (tb->fastreuse > 0 && 116 if (tb->fastreuse > 0 &&
117 sk->sk_reuse && 117 sk->sk_reuse &&
118 sk->sk_state != TCP_LISTEN && 118 sk->sk_state != TCP_LISTEN &&
@@ -158,7 +158,7 @@ have_snum:
158 hashinfo->bhash_size)]; 158 hashinfo->bhash_size)];
159 spin_lock(&head->lock); 159 spin_lock(&head->lock);
160 inet_bind_bucket_for_each(tb, node, &head->chain) 160 inet_bind_bucket_for_each(tb, node, &head->chain)
161 if (ib_net(tb) == net && tb->port == snum) 161 if (net_eq(ib_net(tb), net) && tb->port == snum)
162 goto tb_found; 162 goto tb_found;
163 } 163 }
164 tb = NULL; 164 tb = NULL;
@@ -531,7 +531,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
531 &expire, &resend); 531 &expire, &resend);
532 if (!expire && 532 if (!expire &&
533 (!resend || 533 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req) || 534 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
535 inet_rsk(req)->acked)) { 535 inet_rsk(req)->acked)) {
536 unsigned long timeo; 536 unsigned long timeo;
537 537
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 47ad7aab51e3..94ef51aa5bc9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -454,7 +454,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
454 * unique enough. 454 * unique enough.
455 */ 455 */
456 inet_bind_bucket_for_each(tb, node, &head->chain) { 456 inet_bind_bucket_for_each(tb, node, &head->chain) {
457 if (ib_net(tb) == net && tb->port == port) { 457 if (net_eq(ib_net(tb), net) &&
458 tb->port == port) {
458 if (tb->fastreuse >= 0) 459 if (tb->fastreuse >= 0)
459 goto next_port; 460 goto next_port;
460 WARN_ON(hlist_empty(&tb->owners)); 461 WARN_ON(hlist_empty(&tb->owners));
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 6a667dae315e..47038cb6c138 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -64,15 +64,15 @@ static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
64 if (iph->ihl != IPH_LEN_WO_OPTIONS) 64 if (iph->ihl != IPH_LEN_WO_OPTIONS)
65 return -1; 65 return -1;
66 66
67 if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack 67 if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack ||
68 || tcph->rst || tcph->syn || tcph->fin) 68 tcph->rst || tcph->syn || tcph->fin)
69 return -1; 69 return -1;
70 70
71 if (INET_ECN_is_ce(ipv4_get_dsfield(iph))) 71 if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
72 return -1; 72 return -1;
73 73
74 if (tcph->doff != TCPH_LEN_WO_OPTIONS 74 if (tcph->doff != TCPH_LEN_WO_OPTIONS &&
75 && tcph->doff != TCPH_LEN_W_TIMESTAMP) 75 tcph->doff != TCPH_LEN_W_TIMESTAMP)
76 return -1; 76 return -1;
77 77
78 /* check tcp options (only timestamp allowed) */ 78 /* check tcp options (only timestamp allowed) */
@@ -262,10 +262,10 @@ static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
262 struct iphdr *iph, 262 struct iphdr *iph,
263 struct tcphdr *tcph) 263 struct tcphdr *tcph)
264{ 264{
265 if ((lro_desc->iph->saddr != iph->saddr) 265 if ((lro_desc->iph->saddr != iph->saddr) ||
266 || (lro_desc->iph->daddr != iph->daddr) 266 (lro_desc->iph->daddr != iph->daddr) ||
267 || (lro_desc->tcph->source != tcph->source) 267 (lro_desc->tcph->source != tcph->source) ||
268 || (lro_desc->tcph->dest != tcph->dest)) 268 (lro_desc->tcph->dest != tcph->dest))
269 return -1; 269 return -1;
270 return 0; 270 return 0;
271} 271}
@@ -339,9 +339,9 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
339 u64 flags; 339 u64 flags;
340 int vlan_hdr_len = 0; 340 int vlan_hdr_len = 0;
341 341
342 if (!lro_mgr->get_skb_header 342 if (!lro_mgr->get_skb_header ||
343 || lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph, 343 lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph,
344 &flags, priv)) 344 &flags, priv))
345 goto out; 345 goto out;
346 346
347 if (!(flags & LRO_IPV4) || !(flags & LRO_TCP)) 347 if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
@@ -351,8 +351,8 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
351 if (!lro_desc) 351 if (!lro_desc)
352 goto out; 352 goto out;
353 353
354 if ((skb->protocol == htons(ETH_P_8021Q)) 354 if ((skb->protocol == htons(ETH_P_8021Q)) &&
355 && !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID)) 355 !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
356 vlan_hdr_len = VLAN_HLEN; 356 vlan_hdr_len = VLAN_HLEN;
357 357
358 if (!lro_desc->active) { /* start new lro session */ 358 if (!lro_desc->active) { /* start new lro session */
@@ -446,9 +446,9 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
446 int hdr_len = LRO_MAX_PG_HLEN; 446 int hdr_len = LRO_MAX_PG_HLEN;
447 int vlan_hdr_len = 0; 447 int vlan_hdr_len = 0;
448 448
449 if (!lro_mgr->get_frag_header 449 if (!lro_mgr->get_frag_header ||
450 || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph, 450 lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
451 (void *)&tcph, &flags, priv)) { 451 (void *)&tcph, &flags, priv)) {
452 mac_hdr = page_address(frags->page) + frags->page_offset; 452 mac_hdr = page_address(frags->page) + frags->page_offset;
453 goto out1; 453 goto out1;
454 } 454 }
@@ -472,8 +472,8 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
472 if (!skb) 472 if (!skb)
473 goto out; 473 goto out;
474 474
475 if ((skb->protocol == htons(ETH_P_8021Q)) 475 if ((skb->protocol == htons(ETH_P_8021Q)) &&
476 && !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID)) 476 !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
477 vlan_hdr_len = VLAN_HLEN; 477 vlan_hdr_len = VLAN_HLEN;
478 478
479 iph = (void *)(skb->data + vlan_hdr_len); 479 iph = (void *)(skb->data + vlan_hdr_len);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 1f5d508bb18b..31f931ef3daf 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -421,37 +421,46 @@ out:
421 421
422EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 422EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
423 423
424void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, 424void inet_twsk_purge(struct inet_hashinfo *hashinfo,
425 struct inet_timewait_death_row *twdr, int family) 425 struct inet_timewait_death_row *twdr, int family)
426{ 426{
427 struct inet_timewait_sock *tw; 427 struct inet_timewait_sock *tw;
428 struct sock *sk; 428 struct sock *sk;
429 struct hlist_nulls_node *node; 429 struct hlist_nulls_node *node;
430 int h; 430 unsigned int slot;
431 431
432 local_bh_disable(); 432 for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
433 for (h = 0; h <= hashinfo->ehash_mask; h++) { 433 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
434 struct inet_ehash_bucket *head = 434restart_rcu:
435 inet_ehash_bucket(hashinfo, h); 435 rcu_read_lock();
436 spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
437restart: 436restart:
438 spin_lock(lock); 437 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
439 sk_nulls_for_each(sk, node, &head->twchain) {
440
441 tw = inet_twsk(sk); 438 tw = inet_twsk(sk);
442 if (!net_eq(twsk_net(tw), net) || 439 if ((tw->tw_family != family) ||
443 tw->tw_family != family) 440 atomic_read(&twsk_net(tw)->count))
441 continue;
442
443 if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
444 continue; 444 continue;
445 445
446 atomic_inc(&tw->tw_refcnt); 446 if (unlikely((tw->tw_family != family) ||
447 spin_unlock(lock); 447 atomic_read(&twsk_net(tw)->count))) {
448 inet_twsk_put(tw);
449 goto restart;
450 }
451
452 rcu_read_unlock();
448 inet_twsk_deschedule(tw, twdr); 453 inet_twsk_deschedule(tw, twdr);
449 inet_twsk_put(tw); 454 inet_twsk_put(tw);
450 455 goto restart_rcu;
451 goto restart;
452 } 456 }
453 spin_unlock(lock); 457 /* If the nulls value we got at the end of this lookup is
458 * not the expected one, we must restart lookup.
459 * We probably met an item that was moved to another chain.
460 */
461 if (get_nulls_value(node) != slot)
462 goto restart;
463 rcu_read_unlock();
454 } 464 }
455 local_bh_enable();
456} 465}
457EXPORT_SYMBOL_GPL(inet_twsk_purge); 466EXPORT_SYMBOL_GPL(inet_twsk_purge);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index b1fbe18feb5a..6bcfe52a9c87 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -67,9 +67,6 @@
67 * ip_id_count: idlock 67 * ip_id_count: idlock
68 */ 68 */
69 69
70/* Exported for inet_getid inline function. */
71DEFINE_SPINLOCK(inet_peer_idlock);
72
73static struct kmem_cache *peer_cachep __read_mostly; 70static struct kmem_cache *peer_cachep __read_mostly;
74 71
75#define node_height(x) x->avl_height 72#define node_height(x) x->avl_height
@@ -390,7 +387,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
390 n->v4daddr = daddr; 387 n->v4daddr = daddr;
391 atomic_set(&n->refcnt, 1); 388 atomic_set(&n->refcnt, 1);
392 atomic_set(&n->rid, 0); 389 atomic_set(&n->rid, 0);
393 n->ip_id_count = secure_ip_id(daddr); 390 atomic_set(&n->ip_id_count, secure_ip_id(daddr));
394 n->tcp_ts_stamp = 0; 391 n->tcp_ts_stamp = 0;
395 392
396 write_lock_bh(&peer_pool_lock); 393 write_lock_bh(&peer_pool_lock);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 575f9bd51ccd..c4735310a923 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -206,10 +206,11 @@ static void ip_expire(unsigned long arg)
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 207
208 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 208 /* Send an ICMP "Fragment Reassembly Timeout" message. */
209 if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) { 209 rcu_read_lock();
210 head->dev = dev_get_by_index_rcu(net, qp->iif);
211 if (head->dev)
210 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
211 dev_put(head->dev); 213 rcu_read_unlock();
212 }
213 } 214 }
214out: 215out:
215 spin_unlock(&qp->q.lock); 216 spin_unlock(&qp->q.lock);
@@ -563,7 +564,7 @@ out_oversize:
563 printk(KERN_INFO "Oversized IP packet from %pI4.\n", 564 printk(KERN_INFO "Oversized IP packet from %pI4.\n",
564 &qp->saddr); 565 &qp->saddr);
565out_fail: 566out_fail:
566 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); 567 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
567 return err; 568 return err;
568} 569}
569 570
@@ -657,7 +658,7 @@ static int ip4_frags_ns_ctl_register(struct net *net)
657 struct ctl_table_header *hdr; 658 struct ctl_table_header *hdr;
658 659
659 table = ip4_frags_ns_ctl_table; 660 table = ip4_frags_ns_ctl_table;
660 if (net != &init_net) { 661 if (!net_eq(net, &init_net)) {
661 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 662 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
662 if (table == NULL) 663 if (table == NULL)
663 goto err_alloc; 664 goto err_alloc;
@@ -675,7 +676,7 @@ static int ip4_frags_ns_ctl_register(struct net *net)
675 return 0; 676 return 0;
676 677
677err_reg: 678err_reg:
678 if (net != &init_net) 679 if (!net_eq(net, &init_net))
679 kfree(table); 680 kfree(table);
680err_alloc: 681err_alloc:
681 return -ENOMEM; 682 return -ENOMEM;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a77807d449e3..f36ce156cac6 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -125,7 +125,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
125 125
126#define HASH_SIZE 16 126#define HASH_SIZE 16
127 127
128static int ipgre_net_id; 128static int ipgre_net_id __read_mostly;
129struct ipgre_net { 129struct ipgre_net {
130 struct ip_tunnel *tunnels[4][HASH_SIZE]; 130 struct ip_tunnel *tunnels[4][HASH_SIZE];
131 131
@@ -1309,17 +1309,8 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1309 1309
1310static int ipgre_init_net(struct net *net) 1310static int ipgre_init_net(struct net *net)
1311{ 1311{
1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1312 int err; 1313 int err;
1313 struct ipgre_net *ign;
1314
1315 err = -ENOMEM;
1316 ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
1317 if (ign == NULL)
1318 goto err_alloc;
1319
1320 err = net_assign_generic(net, ipgre_net_id, ign);
1321 if (err < 0)
1322 goto err_assign;
1323 1314
1324 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0", 1315 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1325 ipgre_tunnel_setup); 1316 ipgre_tunnel_setup);
@@ -1340,10 +1331,6 @@ static int ipgre_init_net(struct net *net)
1340err_reg_dev: 1331err_reg_dev:
1341 free_netdev(ign->fb_tunnel_dev); 1332 free_netdev(ign->fb_tunnel_dev);
1342err_alloc_dev: 1333err_alloc_dev:
1343 /* nothing */
1344err_assign:
1345 kfree(ign);
1346err_alloc:
1347 return err; 1334 return err;
1348} 1335}
1349 1336
@@ -1357,12 +1344,13 @@ static void ipgre_exit_net(struct net *net)
1357 ipgre_destroy_tunnels(ign, &list); 1344 ipgre_destroy_tunnels(ign, &list);
1358 unregister_netdevice_many(&list); 1345 unregister_netdevice_many(&list);
1359 rtnl_unlock(); 1346 rtnl_unlock();
1360 kfree(ign);
1361} 1347}
1362 1348
1363static struct pernet_operations ipgre_net_ops = { 1349static struct pernet_operations ipgre_net_ops = {
1364 .init = ipgre_init_net, 1350 .init = ipgre_init_net,
1365 .exit = ipgre_exit_net, 1351 .exit = ipgre_exit_net,
1352 .id = &ipgre_net_id,
1353 .size = sizeof(struct ipgre_net),
1366}; 1354};
1367 1355
1368static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) 1356static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1476,14 +1464,14 @@ static void ipgre_tap_setup(struct net_device *dev)
1476 1464
1477 ether_setup(dev); 1465 ether_setup(dev);
1478 1466
1479 dev->netdev_ops = &ipgre_netdev_ops; 1467 dev->netdev_ops = &ipgre_tap_netdev_ops;
1480 dev->destructor = free_netdev; 1468 dev->destructor = free_netdev;
1481 1469
1482 dev->iflink = 0; 1470 dev->iflink = 0;
1483 dev->features |= NETIF_F_NETNS_LOCAL; 1471 dev->features |= NETIF_F_NETNS_LOCAL;
1484} 1472}
1485 1473
1486static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[], 1474static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1487 struct nlattr *data[]) 1475 struct nlattr *data[])
1488{ 1476{
1489 struct ip_tunnel *nt; 1477 struct ip_tunnel *nt;
@@ -1537,25 +1525,29 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1537 if (t->dev != dev) 1525 if (t->dev != dev)
1538 return -EEXIST; 1526 return -EEXIST;
1539 } else { 1527 } else {
1540 unsigned nflags = 0;
1541
1542 t = nt; 1528 t = nt;
1543 1529
1544 if (ipv4_is_multicast(p.iph.daddr)) 1530 if (dev->type != ARPHRD_ETHER) {
1545 nflags = IFF_BROADCAST; 1531 unsigned nflags = 0;
1546 else if (p.iph.daddr)
1547 nflags = IFF_POINTOPOINT;
1548 1532
1549 if ((dev->flags ^ nflags) & 1533 if (ipv4_is_multicast(p.iph.daddr))
1550 (IFF_POINTOPOINT | IFF_BROADCAST)) 1534 nflags = IFF_BROADCAST;
1551 return -EINVAL; 1535 else if (p.iph.daddr)
1536 nflags = IFF_POINTOPOINT;
1537
1538 if ((dev->flags ^ nflags) &
1539 (IFF_POINTOPOINT | IFF_BROADCAST))
1540 return -EINVAL;
1541 }
1552 1542
1553 ipgre_tunnel_unlink(ign, t); 1543 ipgre_tunnel_unlink(ign, t);
1554 t->parms.iph.saddr = p.iph.saddr; 1544 t->parms.iph.saddr = p.iph.saddr;
1555 t->parms.iph.daddr = p.iph.daddr; 1545 t->parms.iph.daddr = p.iph.daddr;
1556 t->parms.i_key = p.i_key; 1546 t->parms.i_key = p.i_key;
1557 memcpy(dev->dev_addr, &p.iph.saddr, 4); 1547 if (dev->type != ARPHRD_ETHER) {
1558 memcpy(dev->broadcast, &p.iph.daddr, 4); 1548 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1549 memcpy(dev->broadcast, &p.iph.daddr, 4);
1550 }
1559 ipgre_tunnel_link(ign, t); 1551 ipgre_tunnel_link(ign, t);
1560 netdev_state_change(dev); 1552 netdev_state_change(dev);
1561 } 1553 }
@@ -1678,7 +1670,7 @@ static int __init ipgre_init(void)
1678 return -EAGAIN; 1670 return -EAGAIN;
1679 } 1671 }
1680 1672
1681 err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops); 1673 err = register_pernet_device(&ipgre_net_ops);
1682 if (err < 0) 1674 if (err < 0)
1683 goto gen_device_failed; 1675 goto gen_device_failed;
1684 1676
@@ -1696,7 +1688,7 @@ out:
1696tap_ops_failed: 1688tap_ops_failed:
1697 rtnl_link_unregister(&ipgre_link_ops); 1689 rtnl_link_unregister(&ipgre_link_ops);
1698rtnl_link_failed: 1690rtnl_link_failed:
1699 unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops); 1691 unregister_pernet_device(&ipgre_net_ops);
1700gen_device_failed: 1692gen_device_failed:
1701 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); 1693 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1702 goto out; 1694 goto out;
@@ -1706,7 +1698,7 @@ static void __exit ipgre_fini(void)
1706{ 1698{
1707 rtnl_link_unregister(&ipgre_tap_ops); 1699 rtnl_link_unregister(&ipgre_tap_ops);
1708 rtnl_link_unregister(&ipgre_link_ops); 1700 rtnl_link_unregister(&ipgre_link_ops);
1709 unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops); 1701 unregister_pernet_device(&ipgre_net_ops);
1710 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1702 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1711 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1703 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1712} 1704}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fdf51badc8e5..c29de9879fda 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -164,7 +164,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
164 if (sk && inet_sk(sk)->inet_num == protocol && 164 if (sk && inet_sk(sk)->inet_num == protocol &&
165 (!sk->sk_bound_dev_if || 165 (!sk->sk_bound_dev_if ||
166 sk->sk_bound_dev_if == dev->ifindex) && 166 sk->sk_bound_dev_if == dev->ifindex) &&
167 sock_net(sk) == dev_net(dev)) { 167 net_eq(sock_net(sk), dev_net(dev))) {
168 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 168 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { 169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
170 read_unlock(&ip_ra_lock); 170 read_unlock(&ip_ra_lock);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 322b40864ac0..e34013a78ef4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -264,9 +264,11 @@ int ip_mc_output(struct sk_buff *skb)
264 264
265 This check is duplicated in ip_mr_input at the moment. 265 This check is duplicated in ip_mr_input at the moment.
266 */ 266 */
267 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED)) 267 &&
268 ((rt->rt_flags & RTCF_LOCAL) ||
269 !(IPCB(skb)->flags & IPSKB_FORWARDED))
268#endif 270#endif
269 ) { 271 ) {
270 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 272 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
271 if (newskb) 273 if (newskb)
272 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, 274 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
@@ -501,8 +503,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
501 if (skb->sk) { 503 if (skb->sk) {
502 frag->sk = skb->sk; 504 frag->sk = skb->sk;
503 frag->destructor = sock_wfree; 505 frag->destructor = sock_wfree;
504 truesizes += frag->truesize;
505 } 506 }
507 truesizes += frag->truesize;
506 } 508 }
507 509
508 /* Everything is OK. Generate! */ 510 /* Everything is OK. Generate! */
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index f8d04c256454..4e08b7f2331c 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1172,10 +1172,9 @@ static int __init ic_dynamic(void)
1172 schedule_timeout_uninterruptible(1); 1172 schedule_timeout_uninterruptible(1);
1173#ifdef IPCONFIG_DHCP 1173#ifdef IPCONFIG_DHCP
1174 /* DHCP isn't done until we get a DHCPACK. */ 1174 /* DHCP isn't done until we get a DHCPACK. */
1175 if ((ic_got_reply & IC_BOOTP) 1175 if ((ic_got_reply & IC_BOOTP) &&
1176 && (ic_proto_enabled & IC_USE_DHCP) 1176 (ic_proto_enabled & IC_USE_DHCP) &&
1177 && ic_dhcp_msgtype != DHCPACK) 1177 ic_dhcp_msgtype != DHCPACK) {
1178 {
1179 ic_got_reply = 0; 1178 ic_got_reply = 0;
1180 printk(","); 1179 printk(",");
1181 continue; 1180 continue;
@@ -1344,9 +1343,9 @@ static int __init ip_auto_config(void)
1344 */ 1343 */
1345 if (ic_myaddr == NONE || 1344 if (ic_myaddr == NONE ||
1346#ifdef CONFIG_ROOT_NFS 1345#ifdef CONFIG_ROOT_NFS
1347 (root_server_addr == NONE 1346 (root_server_addr == NONE &&
1348 && ic_servaddr == NONE 1347 ic_servaddr == NONE &&
1349 && ROOT_DEV == Root_NFS) || 1348 ROOT_DEV == Root_NFS) ||
1350#endif 1349#endif
1351 ic_first_dev->next) { 1350 ic_first_dev->next) {
1352#ifdef IPCONFIG_DYNAMIC 1351#ifdef IPCONFIG_DYNAMIC
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index a2ca53da4372..eda04fed3379 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -119,7 +119,7 @@
119#define HASH_SIZE 16 119#define HASH_SIZE 16
120#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 120#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
121 121
122static int ipip_net_id; 122static int ipip_net_id __read_mostly;
123struct ipip_net { 123struct ipip_net {
124 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 124 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
125 struct ip_tunnel *tunnels_r[HASH_SIZE]; 125 struct ip_tunnel *tunnels_r[HASH_SIZE];
@@ -446,25 +446,27 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
446 goto tx_error; 446 goto tx_error;
447 } 447 }
448 448
449 if (tiph->frag_off) 449 df |= old_iph->frag_off & htons(IP_DF);
450
451 if (df) {
450 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 452 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
451 else
452 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
453 453
454 if (mtu < 68) { 454 if (mtu < 68) {
455 stats->collisions++; 455 stats->collisions++;
456 ip_rt_put(rt); 456 ip_rt_put(rt);
457 goto tx_error; 457 goto tx_error;
458 } 458 }
459 if (skb_dst(skb))
460 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
461 459
462 df |= (old_iph->frag_off&htons(IP_DF)); 460 if (skb_dst(skb))
461 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
463 462
464 if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) { 463 if ((old_iph->frag_off & htons(IP_DF)) &&
465 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 464 mtu < ntohs(old_iph->tot_len)) {
466 ip_rt_put(rt); 465 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
467 goto tx_error; 466 htonl(mtu));
467 ip_rt_put(rt);
468 goto tx_error;
469 }
468 } 470 }
469 471
470 if (tunnel->err_count > 0) { 472 if (tunnel->err_count > 0) {
@@ -773,17 +775,8 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
773 775
774static int ipip_init_net(struct net *net) 776static int ipip_init_net(struct net *net)
775{ 777{
778 struct ipip_net *ipn = net_generic(net, ipip_net_id);
776 int err; 779 int err;
777 struct ipip_net *ipn;
778
779 err = -ENOMEM;
780 ipn = kzalloc(sizeof(struct ipip_net), GFP_KERNEL);
781 if (ipn == NULL)
782 goto err_alloc;
783
784 err = net_assign_generic(net, ipip_net_id, ipn);
785 if (err < 0)
786 goto err_assign;
787 780
788 ipn->tunnels[0] = ipn->tunnels_wc; 781 ipn->tunnels[0] = ipn->tunnels_wc;
789 ipn->tunnels[1] = ipn->tunnels_l; 782 ipn->tunnels[1] = ipn->tunnels_l;
@@ -810,29 +803,26 @@ err_reg_dev:
810 free_netdev(ipn->fb_tunnel_dev); 803 free_netdev(ipn->fb_tunnel_dev);
811err_alloc_dev: 804err_alloc_dev:
812 /* nothing */ 805 /* nothing */
813err_assign:
814 kfree(ipn);
815err_alloc:
816 return err; 806 return err;
817} 807}
818 808
819static void ipip_exit_net(struct net *net) 809static void ipip_exit_net(struct net *net)
820{ 810{
821 struct ipip_net *ipn; 811 struct ipip_net *ipn = net_generic(net, ipip_net_id);
822 LIST_HEAD(list); 812 LIST_HEAD(list);
823 813
824 ipn = net_generic(net, ipip_net_id);
825 rtnl_lock(); 814 rtnl_lock();
826 ipip_destroy_tunnels(ipn, &list); 815 ipip_destroy_tunnels(ipn, &list);
827 unregister_netdevice_queue(ipn->fb_tunnel_dev, &list); 816 unregister_netdevice_queue(ipn->fb_tunnel_dev, &list);
828 unregister_netdevice_many(&list); 817 unregister_netdevice_many(&list);
829 rtnl_unlock(); 818 rtnl_unlock();
830 kfree(ipn);
831} 819}
832 820
833static struct pernet_operations ipip_net_ops = { 821static struct pernet_operations ipip_net_ops = {
834 .init = ipip_init_net, 822 .init = ipip_init_net,
835 .exit = ipip_exit_net, 823 .exit = ipip_exit_net,
824 .id = &ipip_net_id,
825 .size = sizeof(struct ipip_net),
836}; 826};
837 827
838static int __init ipip_init(void) 828static int __init ipip_init(void)
@@ -846,7 +836,7 @@ static int __init ipip_init(void)
846 return -EAGAIN; 836 return -EAGAIN;
847 } 837 }
848 838
849 err = register_pernet_gen_device(&ipip_net_id, &ipip_net_ops); 839 err = register_pernet_device(&ipip_net_ops);
850 if (err) 840 if (err)
851 xfrm4_tunnel_deregister(&ipip_handler, AF_INET); 841 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
852 842
@@ -858,7 +848,7 @@ static void __exit ipip_fini(void)
858 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) 848 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
859 printk(KERN_INFO "ipip close: can't deregister tunnel\n"); 849 printk(KERN_INFO "ipip close: can't deregister tunnel\n");
860 850
861 unregister_pernet_gen_device(ipip_net_id, &ipip_net_ops); 851 unregister_pernet_device(&ipip_net_ops);
862} 852}
863 853
864module_init(ipip_init); 854module_init(ipip_init);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ef4ee45b928f..54596f73eff5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -494,8 +494,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
494 return -EINVAL; 494 return -EINVAL;
495 } 495 }
496 496
497 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) 497 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
498 dev_put(dev);
498 return -EADDRNOTAVAIL; 499 return -EADDRNOTAVAIL;
500 }
499 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 501 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
500 ip_rt_multicast_event(in_dev); 502 ip_rt_multicast_event(in_dev);
501 503
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 1725dc0ef688..f53cb8df4182 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -155,10 +155,10 @@ static int nf_ip_reroute(struct sk_buff *skb,
155 if (entry->hook == NF_INET_LOCAL_OUT) { 155 if (entry->hook == NF_INET_LOCAL_OUT) {
156 const struct iphdr *iph = ip_hdr(skb); 156 const struct iphdr *iph = ip_hdr(skb);
157 157
158 if (!(iph->tos == rt_info->tos 158 if (!(iph->tos == rt_info->tos &&
159 && skb->mark == rt_info->mark 159 skb->mark == rt_info->mark &&
160 && iph->daddr == rt_info->daddr 160 iph->daddr == rt_info->daddr &&
161 && iph->saddr == rt_info->saddr)) 161 iph->saddr == rt_info->saddr))
162 return ip_route_me_harder(skb, RTN_UNSPEC); 162 return ip_route_me_harder(skb, RTN_UNSPEC);
163 } 163 }
164 return 0; 164 return 0;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 9f0787091951..49ad44712f46 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -499,7 +499,7 @@ ipq_rcv_nl_event(struct notifier_block *this,
499 499
500 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { 500 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
501 write_lock_bh(&queue_lock); 501 write_lock_bh(&queue_lock);
502 if ((n->net == &init_net) && (n->pid == peer_pid)) 502 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
503 __ipq_reset(); 503 __ipq_reset();
504 write_unlock_bh(&queue_lock); 504 write_unlock_bh(&queue_lock);
505 } 505 }
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 68afc6ecd343..fe1a64479dd0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -750,6 +750,8 @@ static int __init nf_nat_init(void)
750 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 750 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
751 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, 751 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
752 nfnetlink_parse_nat_setup); 752 nfnetlink_parse_nat_setup);
753 BUG_ON(nf_ct_nat_offset != NULL);
754 rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
753 return 0; 755 return 0;
754 756
755 cleanup_extend: 757 cleanup_extend:
@@ -764,6 +766,7 @@ static void __exit nf_nat_cleanup(void)
764 nf_ct_extend_unregister(&nat_extend); 766 nf_ct_extend_unregister(&nat_extend);
765 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); 767 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
766 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL); 768 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
769 rcu_assign_pointer(nf_ct_nat_offset, NULL);
767 synchronize_net(); 770 synchronize_net();
768} 771}
769 772
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 5bf6a92cc551..7f10a6be0191 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -69,6 +69,28 @@ adjust_tcp_sequence(u32 seq,
69 DUMP_OFFSET(this_way); 69 DUMP_OFFSET(this_way);
70} 70}
71 71
72/* Get the offset value, for conntrack */
73s16 nf_nat_get_offset(const struct nf_conn *ct,
74 enum ip_conntrack_dir dir,
75 u32 seq)
76{
77 struct nf_conn_nat *nat = nfct_nat(ct);
78 struct nf_nat_seq *this_way;
79 s16 offset;
80
81 if (!nat)
82 return 0;
83
84 this_way = &nat->seq[dir];
85 spin_lock_bh(&nf_nat_seqofs_lock);
86 offset = after(seq, this_way->correction_pos)
87 ? this_way->offset_after : this_way->offset_before;
88 spin_unlock_bh(&nf_nat_seqofs_lock);
89
90 return offset;
91}
92EXPORT_SYMBOL_GPL(nf_nat_get_offset);
93
72/* Frobs data inside this packet, which is linear. */ 94/* Frobs data inside this packet, which is linear. */
73static void mangle_contents(struct sk_buff *skb, 95static void mangle_contents(struct sk_buff *skb,
74 unsigned int dataoff, 96 unsigned int dataoff,
@@ -185,11 +207,6 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
185 adjust_tcp_sequence(ntohl(tcph->seq), 207 adjust_tcp_sequence(ntohl(tcph->seq),
186 (int)rep_len - (int)match_len, 208 (int)rep_len - (int)match_len,
187 ct, ctinfo); 209 ct, ctinfo);
188 /* Tell TCP window tracking about seq change */
189 nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
190 ct, CTINFO2DIR(ctinfo),
191 (int)rep_len - (int)match_len);
192
193 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); 210 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
194 } 211 }
195 return 1; 212 return 1;
@@ -411,12 +428,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
411 tcph->seq = newseq; 428 tcph->seq = newseq;
412 tcph->ack_seq = newack; 429 tcph->ack_seq = newack;
413 430
414 if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo)) 431 return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
415 return 0;
416
417 nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
418
419 return 1;
420} 432}
421 433
422/* Setup NAT on this expected conntrack so it follows master. */ 434/* Setup NAT on this expected conntrack so it follows master. */
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 9ef8c0829a77..ce154b47f1da 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -351,13 +351,24 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
351 skb->ip_summed = CHECKSUM_NONE; 351 skb->ip_summed = CHECKSUM_NONE;
352 352
353 skb->transport_header = skb->network_header; 353 skb->transport_header = skb->network_header;
354 err = memcpy_fromiovecend((void *)iph, from, 0, length); 354 err = -EFAULT;
355 if (err) 355 if (memcpy_fromiovecend((void *)iph, from, 0, length))
356 goto error_fault; 356 goto error_free;
357 357
358 /* We don't modify invalid header */
359 iphlen = iph->ihl * 4; 358 iphlen = iph->ihl * 4;
360 if (iphlen >= sizeof(*iph) && iphlen <= length) { 359
360 /*
361 * We don't want to modify the ip header, but we do need to
362 * be sure that it won't cause problems later along the network
363 * stack. Specifically we want to make sure that iph->ihl is a
364 * sane value. If ihl points beyond the length of the buffer passed
365 * in, reject the frame as invalid
366 */
367 err = -EINVAL;
368 if (iphlen > length)
369 goto error_free;
370
371 if (iphlen >= sizeof(*iph)) {
361 if (!iph->saddr) 372 if (!iph->saddr)
362 iph->saddr = rt->rt_src; 373 iph->saddr = rt->rt_src;
363 iph->check = 0; 374 iph->check = 0;
@@ -380,8 +391,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
380out: 391out:
381 return 0; 392 return 0;
382 393
383error_fault: 394error_free:
384 err = -EFAULT;
385 kfree_skb(skb); 395 kfree_skb(skb);
386error: 396error:
387 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); 397 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 68fb22702051..90cdcfc32937 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -513,43 +513,42 @@ static const struct file_operations rt_cpu_seq_fops = {
513}; 513};
514 514
515#ifdef CONFIG_NET_CLS_ROUTE 515#ifdef CONFIG_NET_CLS_ROUTE
516static int ip_rt_acct_read(char *buffer, char **start, off_t offset, 516static int rt_acct_proc_show(struct seq_file *m, void *v)
517 int length, int *eof, void *data) 517{
518{ 518 struct ip_rt_acct *dst, *src;
519 unsigned int i; 519 unsigned int i, j;
520 520
521 if ((offset & 3) || (length & 3)) 521 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
522 return -EIO; 522 if (!dst)
523 523 return -ENOMEM;
524 if (offset >= sizeof(struct ip_rt_acct) * 256) { 524
525 *eof = 1; 525 for_each_possible_cpu(i) {
526 return 0; 526 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
527 } 527 for (j = 0; j < 256; j++) {
528 528 dst[j].o_bytes += src[j].o_bytes;
529 if (offset + length >= sizeof(struct ip_rt_acct) * 256) { 529 dst[j].o_packets += src[j].o_packets;
530 length = sizeof(struct ip_rt_acct) * 256 - offset; 530 dst[j].i_bytes += src[j].i_bytes;
531 *eof = 1; 531 dst[j].i_packets += src[j].i_packets;
532 }
532 } 533 }
533 534
534 offset /= sizeof(u32); 535 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
535 536 kfree(dst);
536 if (length > 0) { 537 return 0;
537 u32 *dst = (u32 *) buffer; 538}
538
539 *start = buffer;
540 memset(dst, 0, length);
541
542 for_each_possible_cpu(i) {
543 unsigned int j;
544 u32 *src;
545 539
546 src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset; 540static int rt_acct_proc_open(struct inode *inode, struct file *file)
547 for (j = 0; j < length/4; j++) 541{
548 dst[j] += src[j]; 542 return single_open(file, rt_acct_proc_show, NULL);
549 }
550 }
551 return length;
552} 543}
544
545static const struct file_operations rt_acct_proc_fops = {
546 .owner = THIS_MODULE,
547 .open = rt_acct_proc_open,
548 .read = seq_read,
549 .llseek = seq_lseek,
550 .release = single_release,
551};
553#endif 552#endif
554 553
555static int __net_init ip_rt_do_proc_init(struct net *net) 554static int __net_init ip_rt_do_proc_init(struct net *net)
@@ -567,8 +566,7 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
567 goto err2; 566 goto err2;
568 567
569#ifdef CONFIG_NET_CLS_ROUTE 568#ifdef CONFIG_NET_CLS_ROUTE
570 pde = create_proc_read_entry("rt_acct", 0, net->proc_net, 569 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
571 ip_rt_acct_read, NULL);
572 if (!pde) 570 if (!pde)
573 goto err3; 571 goto err3;
574#endif 572#endif
@@ -703,7 +701,7 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
703 701
704static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 702static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
705{ 703{
706 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); 704 return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev));
707} 705}
708 706
709static inline int rt_is_expired(struct rtable *rth) 707static inline int rt_is_expired(struct rtable *rth)
@@ -902,6 +900,12 @@ void rt_cache_flush(struct net *net, int delay)
902 rt_do_flush(!in_softirq()); 900 rt_do_flush(!in_softirq());
903} 901}
904 902
903/* Flush previous cache invalidated entries from the cache */
904void rt_cache_flush_batch(void)
905{
906 rt_do_flush(!in_softirq());
907}
908
905/* 909/*
906 * We change rt_genid and let gc do the cleanup 910 * We change rt_genid and let gc do the cleanup
907 */ 911 */
@@ -1346,9 +1350,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1346 return; 1350 return;
1347 1351
1348 net = dev_net(dev); 1352 net = dev_net(dev);
1349 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) 1353 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1350 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) 1354 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1351 || ipv4_is_zeronet(new_gw)) 1355 ipv4_is_zeronet(new_gw))
1352 goto reject_redirect; 1356 goto reject_redirect;
1353 1357
1354 if (!rt_caching(net)) 1358 if (!rt_caching(net))
@@ -1851,7 +1855,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1851 goto e_inval; 1855 goto e_inval;
1852 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1856 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1853 } else if (fib_validate_source(saddr, 0, tos, 0, 1857 } else if (fib_validate_source(saddr, 0, tos, 0,
1854 dev, &spec_dst, &itag) < 0) 1858 dev, &spec_dst, &itag, 0) < 0)
1855 goto e_inval; 1859 goto e_inval;
1856 1860
1857 rth = dst_alloc(&ipv4_dst_ops); 1861 rth = dst_alloc(&ipv4_dst_ops);
@@ -1964,7 +1968,7 @@ static int __mkroute_input(struct sk_buff *skb,
1964 1968
1965 1969
1966 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1970 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1967 in_dev->dev, &spec_dst, &itag); 1971 in_dev->dev, &spec_dst, &itag, skb->mark);
1968 if (err < 0) { 1972 if (err < 0) {
1969 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1973 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1970 saddr); 1974 saddr);
@@ -2138,7 +2142,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2138 int result; 2142 int result;
2139 result = fib_validate_source(saddr, daddr, tos, 2143 result = fib_validate_source(saddr, daddr, tos,
2140 net->loopback_dev->ifindex, 2144 net->loopback_dev->ifindex,
2141 dev, &spec_dst, &itag); 2145 dev, &spec_dst, &itag, skb->mark);
2142 if (result < 0) 2146 if (result < 0)
2143 goto martian_source; 2147 goto martian_source;
2144 if (result) 2148 if (result)
@@ -2167,7 +2171,7 @@ brd_input:
2167 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 2171 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2168 else { 2172 else {
2169 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 2173 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2170 &itag); 2174 &itag, skb->mark);
2171 if (err < 0) 2175 if (err < 0)
2172 goto martian_source; 2176 goto martian_source;
2173 if (err) 2177 if (err)
@@ -2311,10 +2315,11 @@ skip_cache:
2311 ip_hdr(skb)->protocol); 2315 ip_hdr(skb)->protocol);
2312 if (our 2316 if (our
2313#ifdef CONFIG_IP_MROUTE 2317#ifdef CONFIG_IP_MROUTE
2314 || (!ipv4_is_local_multicast(daddr) && 2318 ||
2315 IN_DEV_MFORWARD(in_dev)) 2319 (!ipv4_is_local_multicast(daddr) &&
2320 IN_DEV_MFORWARD(in_dev))
2316#endif 2321#endif
2317 ) { 2322 ) {
2318 rcu_read_unlock(); 2323 rcu_read_unlock();
2319 return ip_route_input_mc(skb, daddr, saddr, 2324 return ip_route_input_mc(skb, daddr, saddr,
2320 tos, dev, our); 2325 tos, dev, our);
@@ -2511,9 +2516,9 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2511 of another iface. --ANK 2516 of another iface. --ANK
2512 */ 2517 */
2513 2518
2514 if (oldflp->oif == 0 2519 if (oldflp->oif == 0 &&
2515 && (ipv4_is_multicast(oldflp->fl4_dst) || 2520 (ipv4_is_multicast(oldflp->fl4_dst) ||
2516 oldflp->fl4_dst == htonl(0xFFFFFFFF))) { 2521 oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2517 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2522 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2518 dev_out = ip_dev_find(net, oldflp->fl4_src); 2523 dev_out = ip_dev_find(net, oldflp->fl4_src);
2519 if (dev_out == NULL) 2524 if (dev_out == NULL)
@@ -2852,7 +2857,7 @@ static int rt_fill_info(struct net *net,
2852 error = rt->u.dst.error; 2857 error = rt->u.dst.error;
2853 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 2858 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2854 if (rt->peer) { 2859 if (rt->peer) {
2855 id = rt->peer->ip_id_count; 2860 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2856 if (rt->peer->tcp_ts_stamp) { 2861 if (rt->peer->tcp_ts_stamp) {
2857 ts = rt->peer->tcp_ts; 2862 ts = rt->peer->tcp_ts;
2858 tsage = get_seconds() - rt->peer->tcp_ts_stamp; 2863 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
@@ -3309,7 +3314,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
3309 struct ctl_table *tbl; 3314 struct ctl_table *tbl;
3310 3315
3311 tbl = ipv4_route_flush_table; 3316 tbl = ipv4_route_flush_table;
3312 if (net != &init_net) { 3317 if (!net_eq(net, &init_net)) {
3313 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); 3318 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3314 if (tbl == NULL) 3319 if (tbl == NULL)
3315 goto err_dup; 3320 goto err_dup;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3146cc401748..26399ad2a289 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -253,6 +253,8 @@ EXPORT_SYMBOL(cookie_check_timestamp);
253struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 253struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
254 struct ip_options *opt) 254 struct ip_options *opt)
255{ 255{
256 struct tcp_options_received tcp_opt;
257 u8 *hash_location;
256 struct inet_request_sock *ireq; 258 struct inet_request_sock *ireq;
257 struct tcp_request_sock *treq; 259 struct tcp_request_sock *treq;
258 struct tcp_sock *tp = tcp_sk(sk); 260 struct tcp_sock *tp = tcp_sk(sk);
@@ -263,7 +265,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
263 int mss; 265 int mss;
264 struct rtable *rt; 266 struct rtable *rt;
265 __u8 rcv_wscale; 267 __u8 rcv_wscale;
266 struct tcp_options_received tcp_opt;
267 268
268 if (!sysctl_tcp_syncookies || !th->ack) 269 if (!sysctl_tcp_syncookies || !th->ack)
269 goto out; 270 goto out;
@@ -341,7 +342,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
341 342
342 /* check for timestamp cookie support */ 343 /* check for timestamp cookie support */
343 memset(&tcp_opt, 0, sizeof(tcp_opt)); 344 memset(&tcp_opt, 0, sizeof(tcp_opt));
344 tcp_parse_options(skb, &tcp_opt, 0, &rt->u.dst); 345 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
345 346
346 if (tcp_opt.saw_tstamp) 347 if (tcp_opt.saw_tstamp)
347 cookie_check_timestamp(&tcp_opt); 348 cookie_check_timestamp(&tcp_opt);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 2dcf04d9b005..13f7ab6ad6a0 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -714,6 +714,14 @@ static struct ctl_table ipv4_table[] = {
714 }, 714 },
715 { 715 {
716 .ctl_name = CTL_UNNUMBERED, 716 .ctl_name = CTL_UNNUMBERED,
717 .procname = "tcp_cookie_size",
718 .data = &sysctl_tcp_cookie_size,
719 .maxlen = sizeof(int),
720 .mode = 0644,
721 .proc_handler = proc_dointvec
722 },
723 {
724 .ctl_name = CTL_UNNUMBERED,
717 .procname = "udp_mem", 725 .procname = "udp_mem",
718 .data = &sysctl_udp_mem, 726 .data = &sysctl_udp_mem,
719 .maxlen = sizeof(sysctl_udp_mem), 727 .maxlen = sizeof(sysctl_udp_mem),
@@ -818,7 +826,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
818 struct ctl_table *table; 826 struct ctl_table *table;
819 827
820 table = ipv4_net_table; 828 table = ipv4_net_table;
821 if (net != &init_net) { 829 if (!net_eq(net, &init_net)) {
822 table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); 830 table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
823 if (table == NULL) 831 if (table == NULL)
824 goto err_alloc; 832 goto err_alloc;
@@ -849,7 +857,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
849 return 0; 857 return 0;
850 858
851err_reg: 859err_reg:
852 if (net != &init_net) 860 if (!net_eq(net, &init_net))
853 kfree(table); 861 kfree(table);
854err_alloc: 862err_alloc:
855 return -ENOMEM; 863 return -ENOMEM;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e0cfa633680a..c8666b70cde0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -264,6 +264,7 @@
264#include <linux/cache.h> 264#include <linux/cache.h>
265#include <linux/err.h> 265#include <linux/err.h>
266#include <linux/crypto.h> 266#include <linux/crypto.h>
267#include <linux/time.h>
267 268
268#include <net/icmp.h> 269#include <net/icmp.h>
269#include <net/tcp.h> 270#include <net/tcp.h>
@@ -1183,7 +1184,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1183#if TCP_DEBUG 1184#if TCP_DEBUG
1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1185 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1185 1186
1186 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1187 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1188 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1189 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1187#endif 1190#endif
1188 1191
1189 if (inet_csk_ack_scheduled(sk)) { 1192 if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1433,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1430 /* Now that we have two receive queues this 1433 /* Now that we have two receive queues this
1431 * shouldn't happen. 1434 * shouldn't happen.
1432 */ 1435 */
1433 if (before(*seq, TCP_SKB_CB(skb)->seq)) { 1436 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1434 printk(KERN_INFO "recvmsg bug: copied %X " 1437 KERN_INFO "recvmsg bug: copied %X "
1435 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1438 "seq %X rcvnxt %X fl %X\n", *seq,
1439 TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1440 flags))
1436 break; 1441 break;
1437 } 1442
1438 offset = *seq - TCP_SKB_CB(skb)->seq; 1443 offset = *seq - TCP_SKB_CB(skb)->seq;
1439 if (tcp_hdr(skb)->syn) 1444 if (tcp_hdr(skb)->syn)
1440 offset--; 1445 offset--;
@@ -1443,8 +1448,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1443 if (tcp_hdr(skb)->fin) 1448 if (tcp_hdr(skb)->fin)
1444 goto found_fin_ok; 1449 goto found_fin_ok;
1445 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1450 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
1446 "copied %X seq %X\n", *seq, 1451 "copied %X seq %X rcvnxt %X fl %X\n",
1447 TCP_SKB_CB(skb)->seq); 1452 *seq, TCP_SKB_CB(skb)->seq,
1453 tp->rcv_nxt, flags);
1448 } 1454 }
1449 1455
1450 /* Well, if we have backlog, try to process it now yet. */ 1456 /* Well, if we have backlog, try to process it now yet. */
@@ -2054,6 +2060,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2054 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2060 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2055 tp->snd_cwnd_cnt = 0; 2061 tp->snd_cwnd_cnt = 0;
2056 tp->bytes_acked = 0; 2062 tp->bytes_acked = 0;
2063 tp->window_clamp = 0;
2057 tcp_set_ca_state(sk, TCP_CA_Open); 2064 tcp_set_ca_state(sk, TCP_CA_Open);
2058 tcp_clear_retrans(tp); 2065 tcp_clear_retrans(tp);
2059 inet_csk_delack_init(sk); 2066 inet_csk_delack_init(sk);
@@ -2078,8 +2085,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2078 int val; 2085 int val;
2079 int err = 0; 2086 int err = 0;
2080 2087
2081 /* This is a string value all the others are int's */ 2088 /* These are data/string values, all the others are ints */
2082 if (optname == TCP_CONGESTION) { 2089 switch (optname) {
2090 case TCP_CONGESTION: {
2083 char name[TCP_CA_NAME_MAX]; 2091 char name[TCP_CA_NAME_MAX];
2084 2092
2085 if (optlen < 1) 2093 if (optlen < 1)
@@ -2096,6 +2104,93 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2096 release_sock(sk); 2104 release_sock(sk);
2097 return err; 2105 return err;
2098 } 2106 }
2107 case TCP_COOKIE_TRANSACTIONS: {
2108 struct tcp_cookie_transactions ctd;
2109 struct tcp_cookie_values *cvp = NULL;
2110
2111 if (sizeof(ctd) > optlen)
2112 return -EINVAL;
2113 if (copy_from_user(&ctd, optval, sizeof(ctd)))
2114 return -EFAULT;
2115
2116 if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
2117 ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
2118 return -EINVAL;
2119
2120 if (ctd.tcpct_cookie_desired == 0) {
2121 /* default to global value */
2122 } else if ((0x1 & ctd.tcpct_cookie_desired) ||
2123 ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
2124 ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
2125 return -EINVAL;
2126 }
2127
2128 if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
2129 /* Supercedes all other values */
2130 lock_sock(sk);
2131 if (tp->cookie_values != NULL) {
2132 kref_put(&tp->cookie_values->kref,
2133 tcp_cookie_values_release);
2134 tp->cookie_values = NULL;
2135 }
2136 tp->rx_opt.cookie_in_always = 0; /* false */
2137 tp->rx_opt.cookie_out_never = 1; /* true */
2138 release_sock(sk);
2139 return err;
2140 }
2141
2142 /* Allocate ancillary memory before locking.
2143 */
2144 if (ctd.tcpct_used > 0 ||
2145 (tp->cookie_values == NULL &&
2146 (sysctl_tcp_cookie_size > 0 ||
2147 ctd.tcpct_cookie_desired > 0 ||
2148 ctd.tcpct_s_data_desired > 0))) {
2149 cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
2150 GFP_KERNEL);
2151 if (cvp == NULL)
2152 return -ENOMEM;
2153 }
2154 lock_sock(sk);
2155 tp->rx_opt.cookie_in_always =
2156 (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
2157 tp->rx_opt.cookie_out_never = 0; /* false */
2158
2159 if (tp->cookie_values != NULL) {
2160 if (cvp != NULL) {
2161 /* Changed values are recorded by a changed
2162 * pointer, ensuring the cookie will differ,
2163 * without separately hashing each value later.
2164 */
2165 kref_put(&tp->cookie_values->kref,
2166 tcp_cookie_values_release);
2167 kref_init(&cvp->kref);
2168 tp->cookie_values = cvp;
2169 } else {
2170 cvp = tp->cookie_values;
2171 }
2172 }
2173 if (cvp != NULL) {
2174 cvp->cookie_desired = ctd.tcpct_cookie_desired;
2175
2176 if (ctd.tcpct_used > 0) {
2177 memcpy(cvp->s_data_payload, ctd.tcpct_value,
2178 ctd.tcpct_used);
2179 cvp->s_data_desired = ctd.tcpct_used;
2180 cvp->s_data_constant = 1; /* true */
2181 } else {
2182 /* No constant payload data. */
2183 cvp->s_data_desired = ctd.tcpct_s_data_desired;
2184 cvp->s_data_constant = 0; /* false */
2185 }
2186 }
2187 release_sock(sk);
2188 return err;
2189 }
2190 default:
2191 /* fallthru */
2192 break;
2193 };
2099 2194
2100 if (optlen < sizeof(int)) 2195 if (optlen < sizeof(int))
2101 return -EINVAL; 2196 return -EINVAL;
@@ -2420,6 +2515,47 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2420 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 2515 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2421 return -EFAULT; 2516 return -EFAULT;
2422 return 0; 2517 return 0;
2518
2519 case TCP_COOKIE_TRANSACTIONS: {
2520 struct tcp_cookie_transactions ctd;
2521 struct tcp_cookie_values *cvp = tp->cookie_values;
2522
2523 if (get_user(len, optlen))
2524 return -EFAULT;
2525 if (len < sizeof(ctd))
2526 return -EINVAL;
2527
2528 memset(&ctd, 0, sizeof(ctd));
2529 ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
2530 TCP_COOKIE_IN_ALWAYS : 0)
2531 | (tp->rx_opt.cookie_out_never ?
2532 TCP_COOKIE_OUT_NEVER : 0);
2533
2534 if (cvp != NULL) {
2535 ctd.tcpct_flags |= (cvp->s_data_in ?
2536 TCP_S_DATA_IN : 0)
2537 | (cvp->s_data_out ?
2538 TCP_S_DATA_OUT : 0);
2539
2540 ctd.tcpct_cookie_desired = cvp->cookie_desired;
2541 ctd.tcpct_s_data_desired = cvp->s_data_desired;
2542
2543 /* Cookie(s) saved, return as nonce */
2544 if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) {
2545 /* impossible? */
2546 return -EINVAL;
2547 }
2548 memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
2549 cvp->cookie_pair_size);
2550 ctd.tcpct_used = cvp->cookie_pair_size;
2551 }
2552
2553 if (put_user(sizeof(ctd), optlen))
2554 return -EFAULT;
2555 if (copy_to_user(optval, &ctd, sizeof(ctd)))
2556 return -EFAULT;
2557 return 0;
2558 }
2423 default: 2559 default:
2424 return -ENOPROTOOPT; 2560 return -ENOPROTOOPT;
2425 } 2561 }
@@ -2842,6 +2978,135 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
2842 2978
2843#endif 2979#endif
2844 2980
2981/**
2982 * Each Responder maintains up to two secret values concurrently for
2983 * efficient secret rollover. Each secret value has 4 states:
2984 *
2985 * Generating. (tcp_secret_generating != tcp_secret_primary)
2986 * Generates new Responder-Cookies, but not yet used for primary
2987 * verification. This is a short-term state, typically lasting only
2988 * one round trip time (RTT).
2989 *
2990 * Primary. (tcp_secret_generating == tcp_secret_primary)
2991 * Used both for generation and primary verification.
2992 *
2993 * Retiring. (tcp_secret_retiring != tcp_secret_secondary)
2994 * Used for verification, until the first failure that can be
2995 * verified by the newer Generating secret. At that time, this
2996 * cookie's state is changed to Secondary, and the Generating
2997 * cookie's state is changed to Primary. This is a short-term state,
2998 * typically lasting only one round trip time (RTT).
2999 *
3000 * Secondary. (tcp_secret_retiring == tcp_secret_secondary)
3001 * Used for secondary verification, after primary verification
3002 * failures. This state lasts no more than twice the Maximum Segment
3003 * Lifetime (2MSL). Then, the secret is discarded.
3004 */
3005struct tcp_cookie_secret {
3006 /* The secret is divided into two parts. The digest part is the
3007 * equivalent of previously hashing a secret and saving the state,
3008 * and serves as an initialization vector (IV). The message part
3009 * serves as the trailing secret.
3010 */
3011 u32 secrets[COOKIE_WORKSPACE_WORDS];
3012 unsigned long expires;
3013};
3014
3015#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3016#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3017#define TCP_SECRET_LIFE (HZ * 600)
3018
3019static struct tcp_cookie_secret tcp_secret_one;
3020static struct tcp_cookie_secret tcp_secret_two;
3021
3022/* Essentially a circular list, without dynamic allocation. */
3023static struct tcp_cookie_secret *tcp_secret_generating;
3024static struct tcp_cookie_secret *tcp_secret_primary;
3025static struct tcp_cookie_secret *tcp_secret_retiring;
3026static struct tcp_cookie_secret *tcp_secret_secondary;
3027
3028static DEFINE_SPINLOCK(tcp_secret_locker);
3029
3030/* Select a pseudo-random word in the cookie workspace.
3031 */
3032static inline u32 tcp_cookie_work(const u32 *ws, const int n)
3033{
3034 return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
3035}
3036
3037/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
3038 * Called in softirq context.
3039 * Returns: 0 for success.
3040 */
3041int tcp_cookie_generator(u32 *bakery)
3042{
3043 unsigned long jiffy = jiffies;
3044
3045 if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
3046 spin_lock_bh(&tcp_secret_locker);
3047 if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
3048 /* refreshed by another */
3049 memcpy(bakery,
3050 &tcp_secret_generating->secrets[0],
3051 COOKIE_WORKSPACE_WORDS);
3052 } else {
3053 /* still needs refreshing */
3054 get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
3055
3056 /* The first time, paranoia assumes that the
3057 * randomization function isn't as strong. But,
3058 * this secret initialization is delayed until
3059 * the last possible moment (packet arrival).
3060 * Although that time is observable, it is
3061 * unpredictably variable. Mash in the most
3062 * volatile clock bits available, and expire the
3063 * secret extra quickly.
3064 */
3065 if (unlikely(tcp_secret_primary->expires ==
3066 tcp_secret_secondary->expires)) {
3067 struct timespec tv;
3068
3069 getnstimeofday(&tv);
3070 bakery[COOKIE_DIGEST_WORDS+0] ^=
3071 (u32)tv.tv_nsec;
3072
3073 tcp_secret_secondary->expires = jiffy
3074 + TCP_SECRET_1MSL
3075 + (0x0f & tcp_cookie_work(bakery, 0));
3076 } else {
3077 tcp_secret_secondary->expires = jiffy
3078 + TCP_SECRET_LIFE
3079 + (0xff & tcp_cookie_work(bakery, 1));
3080 tcp_secret_primary->expires = jiffy
3081 + TCP_SECRET_2MSL
3082 + (0x1f & tcp_cookie_work(bakery, 2));
3083 }
3084 memcpy(&tcp_secret_secondary->secrets[0],
3085 bakery, COOKIE_WORKSPACE_WORDS);
3086
3087 rcu_assign_pointer(tcp_secret_generating,
3088 tcp_secret_secondary);
3089 rcu_assign_pointer(tcp_secret_retiring,
3090 tcp_secret_primary);
3091 /*
3092 * Neither call_rcu() nor synchronize_rcu() needed.
3093 * Retiring data is not freed. It is replaced after
3094 * further (locked) pointer updates, and a quiet time
3095 * (minimum 1MSL, maximum LIFE - 2MSL).
3096 */
3097 }
3098 spin_unlock_bh(&tcp_secret_locker);
3099 } else {
3100 rcu_read_lock_bh();
3101 memcpy(bakery,
3102 &rcu_dereference(tcp_secret_generating)->secrets[0],
3103 COOKIE_WORKSPACE_WORDS);
3104 rcu_read_unlock_bh();
3105 }
3106 return 0;
3107}
3108EXPORT_SYMBOL(tcp_cookie_generator);
3109
2845void tcp_done(struct sock *sk) 3110void tcp_done(struct sock *sk)
2846{ 3111{
2847 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 3112 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
@@ -2876,6 +3141,7 @@ void __init tcp_init(void)
2876 struct sk_buff *skb = NULL; 3141 struct sk_buff *skb = NULL;
2877 unsigned long nr_pages, limit; 3142 unsigned long nr_pages, limit;
2878 int order, i, max_share; 3143 int order, i, max_share;
3144 unsigned long jiffy = jiffies;
2879 3145
2880 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3146 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
2881 3147
@@ -2969,6 +3235,15 @@ void __init tcp_init(void)
2969 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3235 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
2970 3236
2971 tcp_register_congestion_control(&tcp_reno); 3237 tcp_register_congestion_control(&tcp_reno);
3238
3239 memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
3240 memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
3241 tcp_secret_one.expires = jiffy; /* past due */
3242 tcp_secret_two.expires = jiffy; /* past due */
3243 tcp_secret_generating = &tcp_secret_one;
3244 tcp_secret_primary = &tcp_secret_one;
3245 tcp_secret_retiring = &tcp_secret_two;
3246 tcp_secret_secondary = &tcp_secret_two;
2972} 3247}
2973 3248
2974EXPORT_SYMBOL(tcp_close); 3249EXPORT_SYMBOL(tcp_close);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 26d5c7fc7de5..7c94a4955416 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -92,8 +92,8 @@ static inline void measure_rtt(struct sock *sk, u32 srtt)
92 if (icsk->icsk_ca_state == TCP_CA_Open) { 92 if (icsk->icsk_ca_state == TCP_CA_Open) {
93 if (ca->maxRTT < ca->minRTT) 93 if (ca->maxRTT < ca->minRTT)
94 ca->maxRTT = ca->minRTT; 94 ca->maxRTT = ca->minRTT;
95 if (ca->maxRTT < srtt 95 if (ca->maxRTT < srtt &&
96 && srtt <= ca->maxRTT + msecs_to_jiffies(20)) 96 srtt <= ca->maxRTT + msecs_to_jiffies(20))
97 ca->maxRTT = srtt; 97 ca->maxRTT = srtt;
98 } 98 }
99} 99}
@@ -123,9 +123,9 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt
123 123
124 ca->packetcount += pkts_acked; 124 ca->packetcount += pkts_acked;
125 125
126 if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) 126 if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
127 && now - ca->lasttime >= ca->minRTT 127 now - ca->lasttime >= ca->minRTT &&
128 && ca->minRTT > 0) { 128 ca->minRTT > 0) {
129 __u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime); 129 __u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
130 130
131 if (htcp_ccount(ca) <= 3) { 131 if (htcp_ccount(ca) <= 3) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ba0eab65fe80..57ae96a04220 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
140 * "len" is invariant segment length, including TCP header. 140 * "len" is invariant segment length, including TCP header.
141 */ 141 */
142 len += skb->data - skb_transport_header(skb); 142 len += skb->data - skb_transport_header(skb);
143 if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || 143 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
144 /* If PSH is not set, packet should be 144 /* If PSH is not set, packet should be
145 * full sized, provided peer TCP is not badly broken. 145 * full sized, provided peer TCP is not badly broken.
146 * This observation (if it is correct 8)) allows 146 * This observation (if it is correct 8)) allows
@@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
411 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 411 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
412 412
413 hint = min(hint, tp->rcv_wnd / 2); 413 hint = min(hint, tp->rcv_wnd / 2);
414 hint = min(hint, TCP_MIN_RCVMSS); 414 hint = min(hint, TCP_MSS_DEFAULT);
415 hint = max(hint, TCP_MIN_MSS); 415 hint = max(hint, TCP_MIN_MSS);
416 416
417 inet_csk(sk)->icsk_ack.rcv_mss = hint; 417 inet_csk(sk)->icsk_ack.rcv_mss = hint;
@@ -3698,14 +3698,12 @@ old_ack:
3698 * the fast version below fails. 3698 * the fast version below fails.
3699 */ 3699 */
3700void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3700void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3701 int estab, struct dst_entry *dst) 3701 u8 **hvpp, int estab, struct dst_entry *dst)
3702{ 3702{
3703 unsigned char *ptr; 3703 unsigned char *ptr;
3704 struct tcphdr *th = tcp_hdr(skb); 3704 struct tcphdr *th = tcp_hdr(skb);
3705 int length = (th->doff * 4) - sizeof(struct tcphdr); 3705 int length = (th->doff * 4) - sizeof(struct tcphdr);
3706 3706
3707 BUG_ON(!estab && !dst);
3708
3709 ptr = (unsigned char *)(th + 1); 3707 ptr = (unsigned char *)(th + 1);
3710 opt_rx->saw_tstamp = 0; 3708 opt_rx->saw_tstamp = 0;
3711 3709
@@ -3787,7 +3785,30 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3787 */ 3785 */
3788 break; 3786 break;
3789#endif 3787#endif
3790 } 3788 case TCPOPT_COOKIE:
3789 /* This option is variable length.
3790 */
3791 switch (opsize) {
3792 case TCPOLEN_COOKIE_BASE:
3793 /* not yet implemented */
3794 break;
3795 case TCPOLEN_COOKIE_PAIR:
3796 /* not yet implemented */
3797 break;
3798 case TCPOLEN_COOKIE_MIN+0:
3799 case TCPOLEN_COOKIE_MIN+2:
3800 case TCPOLEN_COOKIE_MIN+4:
3801 case TCPOLEN_COOKIE_MIN+6:
3802 case TCPOLEN_COOKIE_MAX:
3803 /* 16-bit multiple */
3804 opt_rx->cookie_plus = opsize;
3805 *hvpp = ptr;
3806 default:
3807 /* ignore option */
3808 break;
3809 };
3810 break;
3811 };
3791 3812
3792 ptr += opsize-2; 3813 ptr += opsize-2;
3793 length -= opsize; 3814 length -= opsize;
@@ -3815,17 +3836,20 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3815 * If it is wrong it falls back on tcp_parse_options(). 3836 * If it is wrong it falls back on tcp_parse_options().
3816 */ 3837 */
3817static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 3838static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3818 struct tcp_sock *tp) 3839 struct tcp_sock *tp, u8 **hvpp)
3819{ 3840{
3820 if (th->doff == sizeof(struct tcphdr) >> 2) { 3841 /* In the spirit of fast parsing, compare doff directly to constant
3842 * values. Because equality is used, short doff can be ignored here.
3843 */
3844 if (th->doff == (sizeof(*th) / 4)) {
3821 tp->rx_opt.saw_tstamp = 0; 3845 tp->rx_opt.saw_tstamp = 0;
3822 return 0; 3846 return 0;
3823 } else if (tp->rx_opt.tstamp_ok && 3847 } else if (tp->rx_opt.tstamp_ok &&
3824 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 3848 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
3825 if (tcp_parse_aligned_timestamp(tp, th)) 3849 if (tcp_parse_aligned_timestamp(tp, th))
3826 return 1; 3850 return 1;
3827 } 3851 }
3828 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3852 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
3829 return 1; 3853 return 1;
3830} 3854}
3831 3855
@@ -4854,11 +4878,11 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
4854 struct tcp_sock *tp = tcp_sk(sk); 4878 struct tcp_sock *tp = tcp_sk(sk);
4855 4879
4856 /* More than one full frame received... */ 4880 /* More than one full frame received... */
4857 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss 4881 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
4858 /* ... and right edge of window advances far enough. 4882 /* ... and right edge of window advances far enough.
4859 * (tcp_recvmsg() will send ACK otherwise). Or... 4883 * (tcp_recvmsg() will send ACK otherwise). Or...
4860 */ 4884 */
4861 && __tcp_select_window(sk) >= tp->rcv_wnd) || 4885 __tcp_select_window(sk) >= tp->rcv_wnd) ||
4862 /* We ACK each frame or... */ 4886 /* We ACK each frame or... */
4863 tcp_in_quickack_mode(sk) || 4887 tcp_in_quickack_mode(sk) ||
4864 /* We have out of order data. */ 4888 /* We have out of order data. */
@@ -5079,10 +5103,12 @@ out:
5079static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5103static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5080 struct tcphdr *th, int syn_inerr) 5104 struct tcphdr *th, int syn_inerr)
5081{ 5105{
5106 u8 *hash_location;
5082 struct tcp_sock *tp = tcp_sk(sk); 5107 struct tcp_sock *tp = tcp_sk(sk);
5083 5108
5084 /* RFC1323: H1. Apply PAWS check first. */ 5109 /* RFC1323: H1. Apply PAWS check first. */
5085 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5110 if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
5111 tp->rx_opt.saw_tstamp &&
5086 tcp_paws_discard(sk, skb)) { 5112 tcp_paws_discard(sk, skb)) {
5087 if (!th->rst) { 5113 if (!th->rst) {
5088 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5114 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5370,12 +5396,14 @@ discard:
5370static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5396static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5371 struct tcphdr *th, unsigned len) 5397 struct tcphdr *th, unsigned len)
5372{ 5398{
5373 struct tcp_sock *tp = tcp_sk(sk); 5399 u8 *hash_location;
5374 struct inet_connection_sock *icsk = inet_csk(sk); 5400 struct inet_connection_sock *icsk = inet_csk(sk);
5375 int saved_clamp = tp->rx_opt.mss_clamp; 5401 struct tcp_sock *tp = tcp_sk(sk);
5376 struct dst_entry *dst = __sk_dst_get(sk); 5402 struct dst_entry *dst = __sk_dst_get(sk);
5403 struct tcp_cookie_values *cvp = tp->cookie_values;
5404 int saved_clamp = tp->rx_opt.mss_clamp;
5377 5405
5378 tcp_parse_options(skb, &tp->rx_opt, 0, dst); 5406 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst);
5379 5407
5380 if (th->ack) { 5408 if (th->ack) {
5381 /* rfc793: 5409 /* rfc793:
@@ -5472,6 +5500,31 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5472 * Change state from SYN-SENT only after copied_seq 5500 * Change state from SYN-SENT only after copied_seq
5473 * is initialized. */ 5501 * is initialized. */
5474 tp->copied_seq = tp->rcv_nxt; 5502 tp->copied_seq = tp->rcv_nxt;
5503
5504 if (cvp != NULL &&
5505 cvp->cookie_pair_size > 0 &&
5506 tp->rx_opt.cookie_plus > 0) {
5507 int cookie_size = tp->rx_opt.cookie_plus
5508 - TCPOLEN_COOKIE_BASE;
5509 int cookie_pair_size = cookie_size
5510 + cvp->cookie_desired;
5511
5512 /* A cookie extension option was sent and returned.
5513 * Note that each incoming SYNACK replaces the
5514 * Responder cookie. The initial exchange is most
5515 * fragile, as protection against spoofing relies
5516 * entirely upon the sequence and timestamp (above).
5517 * This replacement strategy allows the correct pair to
5518 * pass through, while any others will be filtered via
5519 * Responder verification later.
5520 */
5521 if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
5522 memcpy(&cvp->cookie_pair[cvp->cookie_desired],
5523 hash_location, cookie_size);
5524 cvp->cookie_pair_size = cookie_pair_size;
5525 }
5526 }
5527
5475 smp_mb(); 5528 smp_mb();
5476 tcp_set_state(sk, TCP_ESTABLISHED); 5529 tcp_set_state(sk, TCP_ESTABLISHED);
5477 5530
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 657ae334f125..fee9aabd5aa1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
204 * when trying new connection. 204 * when trying new connection.
205 */ 205 */
206 if (peer != NULL && 206 if (peer != NULL &&
207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { 207 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; 208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts; 209 tp->rx_opt.ts_recent = peer->tcp_ts;
210 } 210 }
@@ -217,7 +217,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
217 if (inet->opt) 217 if (inet->opt)
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
219 219
220 tp->rx_opt.mss_clamp = 536; 220 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
221 221
222 /* Socket identity is still unknown (sport may be zero). 222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket 223 * However we set state to SYN-SENT and not releasing socket
@@ -742,8 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 * This still operates on a request_sock only, not on a big 742 * This still operates on a request_sock only, not on a big
743 * socket. 743 * socket.
744 */ 744 */
745static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct dst_entry *dst) 746 struct request_sock *req,
747 struct request_values *rvp)
747{ 748{
748 const struct inet_request_sock *ireq = inet_rsk(req); 749 const struct inet_request_sock *ireq = inet_rsk(req);
749 int err = -1; 750 int err = -1;
@@ -753,7 +754,7 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
753 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 754 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
754 return -1; 755 return -1;
755 756
756 skb = tcp_make_synack(sk, dst, req); 757 skb = tcp_make_synack(sk, dst, req, rvp);
757 758
758 if (skb) { 759 if (skb) {
759 struct tcphdr *th = tcp_hdr(skb); 760 struct tcphdr *th = tcp_hdr(skb);
@@ -774,9 +775,10 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
774 return err; 775 return err;
775} 776}
776 777
777static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req) 778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp)
778{ 780{
779 return __tcp_v4_send_synack(sk, req, NULL); 781 return __tcp_v4_send_synack(sk, NULL, req, rvp);
780} 782}
781 783
782/* 784/*
@@ -1211,13 +1213,16 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
1211 1213
1212int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1214int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1213{ 1215{
1214 struct inet_request_sock *ireq; 1216 struct tcp_extend_values tmp_ext;
1215 struct tcp_options_received tmp_opt; 1217 struct tcp_options_received tmp_opt;
1218 u8 *hash_location;
1216 struct request_sock *req; 1219 struct request_sock *req;
1220 struct inet_request_sock *ireq;
1221 struct tcp_sock *tp = tcp_sk(sk);
1222 struct dst_entry *dst = NULL;
1217 __be32 saddr = ip_hdr(skb)->saddr; 1223 __be32 saddr = ip_hdr(skb)->saddr;
1218 __be32 daddr = ip_hdr(skb)->daddr; 1224 __be32 daddr = ip_hdr(skb)->daddr;
1219 __u32 isn = TCP_SKB_CB(skb)->when; 1225 __u32 isn = TCP_SKB_CB(skb)->when;
1220 struct dst_entry *dst = NULL;
1221#ifdef CONFIG_SYN_COOKIES 1226#ifdef CONFIG_SYN_COOKIES
1222 int want_cookie = 0; 1227 int want_cookie = 0;
1223#else 1228#else
@@ -1268,16 +1273,50 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1268 goto drop_and_free; 1273 goto drop_and_free;
1269 1274
1270 tcp_clear_options(&tmp_opt); 1275 tcp_clear_options(&tmp_opt);
1271 tmp_opt.mss_clamp = 536; 1276 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1272 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; 1277 tmp_opt.user_mss = tp->rx_opt.user_mss;
1278 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
1279
1280 if (tmp_opt.cookie_plus > 0 &&
1281 tmp_opt.saw_tstamp &&
1282 !tp->rx_opt.cookie_out_never &&
1283 (sysctl_tcp_cookie_size > 0 ||
1284 (tp->cookie_values != NULL &&
1285 tp->cookie_values->cookie_desired > 0))) {
1286 u8 *c;
1287 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1288 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1289
1290 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1291 goto drop_and_release;
1292
1293 /* Secret recipe starts with IP addresses */
1294 *mess++ ^= daddr;
1295 *mess++ ^= saddr;
1296
1297 /* plus variable length Initiator Cookie */
1298 c = (u8 *)mess;
1299 while (l-- > 0)
1300 *c++ ^= *hash_location++;
1273 1301
1274 tcp_parse_options(skb, &tmp_opt, 0, dst); 1302#ifdef CONFIG_SYN_COOKIES
1303 want_cookie = 0; /* not our kind of cookie */
1304#endif
1305 tmp_ext.cookie_out_never = 0; /* false */
1306 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1307 } else if (!tp->rx_opt.cookie_in_always) {
1308 /* redundant indications, but ensure initialization. */
1309 tmp_ext.cookie_out_never = 1; /* true */
1310 tmp_ext.cookie_plus = 0;
1311 } else {
1312 goto drop_and_release;
1313 }
1314 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1275 1315
1276 if (want_cookie && !tmp_opt.saw_tstamp) 1316 if (want_cookie && !tmp_opt.saw_tstamp)
1277 tcp_clear_options(&tmp_opt); 1317 tcp_clear_options(&tmp_opt);
1278 1318
1279 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1319 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1280
1281 tcp_openreq_init(req, &tmp_opt, skb); 1320 tcp_openreq_init(req, &tmp_opt, skb);
1282 1321
1283 if (security_inet_conn_request(sk, skb, req)) 1322 if (security_inet_conn_request(sk, skb, req))
@@ -1308,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1308 tcp_death_row.sysctl_tw_recycle && 1347 tcp_death_row.sysctl_tw_recycle &&
1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1348 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1310 peer->v4daddr == saddr) { 1349 peer->v4daddr == saddr) {
1311 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1350 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1312 (s32)(peer->tcp_ts - req->ts_recent) > 1351 (s32)(peer->tcp_ts - req->ts_recent) >
1313 TCP_PAWS_WINDOW) { 1352 TCP_PAWS_WINDOW) {
1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1353 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1337,7 +1376,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1337 } 1376 }
1338 tcp_rsk(req)->snt_isn = isn; 1377 tcp_rsk(req)->snt_isn = isn;
1339 1378
1340 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie) 1379 if (__tcp_v4_send_synack(sk, dst, req,
1380 (struct request_values *)&tmp_ext) ||
1381 want_cookie)
1341 goto drop_and_free; 1382 goto drop_and_free;
1342 1383
1343 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1384 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
@@ -1727,9 +1768,9 @@ int tcp_v4_remember_stamp(struct sock *sk)
1727 1768
1728 if (peer) { 1769 if (peer) {
1729 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || 1770 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1730 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1771 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1731 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { 1772 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1732 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; 1773 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1733 peer->tcp_ts = tp->rx_opt.ts_recent; 1774 peer->tcp_ts = tp->rx_opt.ts_recent;
1734 } 1775 }
1735 if (release_it) 1776 if (release_it)
@@ -1748,9 +1789,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1748 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 1789 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1749 1790
1750 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || 1791 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1751 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1792 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1752 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { 1793 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1753 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; 1794 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1754 peer->tcp_ts = tcptw->tw_ts_recent; 1795 peer->tcp_ts = tcptw->tw_ts_recent;
1755 } 1796 }
1756 inet_putpeer(peer); 1797 inet_putpeer(peer);
@@ -1815,7 +1856,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1815 */ 1856 */
1816 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1857 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1817 tp->snd_cwnd_clamp = ~0; 1858 tp->snd_cwnd_clamp = ~0;
1818 tp->mss_cache = 536; 1859 tp->mss_cache = TCP_MSS_DEFAULT;
1819 1860
1820 tp->reordering = sysctl_tcp_reordering; 1861 tp->reordering = sysctl_tcp_reordering;
1821 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 1862 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
@@ -1831,6 +1872,19 @@ static int tcp_v4_init_sock(struct sock *sk)
1831 tp->af_specific = &tcp_sock_ipv4_specific; 1872 tp->af_specific = &tcp_sock_ipv4_specific;
1832#endif 1873#endif
1833 1874
1875 /* TCP Cookie Transactions */
1876 if (sysctl_tcp_cookie_size > 0) {
1877 /* Default, cookies without s_data_payload. */
1878 tp->cookie_values =
1879 kzalloc(sizeof(*tp->cookie_values),
1880 sk->sk_allocation);
1881 if (tp->cookie_values != NULL)
1882 kref_init(&tp->cookie_values->kref);
1883 }
1884 /* Presumed zeroed, in order of appearance:
1885 * cookie_in_always, cookie_out_never,
1886 * s_data_constant, s_data_in, s_data_out
1887 */
1834 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1888 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1835 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1889 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1836 1890
@@ -1884,6 +1938,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
1884 sk->sk_sndmsg_page = NULL; 1938 sk->sk_sndmsg_page = NULL;
1885 } 1939 }
1886 1940
1941 /* TCP Cookie Transactions */
1942 if (tp->cookie_values != NULL) {
1943 kref_put(&tp->cookie_values->kref,
1944 tcp_cookie_values_release);
1945 tp->cookie_values = NULL;
1946 }
1947
1887 percpu_counter_dec(&tcp_sockets_allocated); 1948 percpu_counter_dec(&tcp_sockets_allocated);
1888} 1949}
1889 1950
@@ -2468,12 +2529,17 @@ static int __net_init tcp_sk_init(struct net *net)
2468static void __net_exit tcp_sk_exit(struct net *net) 2529static void __net_exit tcp_sk_exit(struct net *net)
2469{ 2530{
2470 inet_ctl_sock_destroy(net->ipv4.tcp_sock); 2531 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2471 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET); 2532}
2533
2534static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2535{
2536 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2472} 2537}
2473 2538
2474static struct pernet_operations __net_initdata tcp_sk_ops = { 2539static struct pernet_operations __net_initdata tcp_sk_ops = {
2475 .init = tcp_sk_init, 2540 .init = tcp_sk_init,
2476 .exit = tcp_sk_exit, 2541 .exit = tcp_sk_exit,
2542 .exit_batch = tcp_sk_exit_batch,
2477}; 2543};
2478 2544
2479void __init tcp_v4_init(void) 2545void __init tcp_v4_init(void)
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index ce3c41ff50b2..de870377fbba 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -143,8 +143,8 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
143 goto out; 143 goto out;
144 144
145 /* we can't calc remote HZ with no different!! */ 145 /* we can't calc remote HZ with no different!! */
146 if (tp->rx_opt.rcv_tsval == lp->remote_ref_time 146 if (tp->rx_opt.rcv_tsval == lp->remote_ref_time ||
147 || tp->rx_opt.rcv_tsecr == lp->local_ref_time) 147 tp->rx_opt.rcv_tsecr == lp->local_ref_time)
148 goto out; 148 goto out;
149 149
150 m = HZ * (tp->rx_opt.rcv_tsval - 150 m = HZ * (tp->rx_opt.rcv_tsval -
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 463d51b53d37..87accec8d097 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -26,13 +26,7 @@
26#include <net/inet_common.h> 26#include <net/inet_common.h>
27#include <net/xfrm.h> 27#include <net/xfrm.h>
28 28
29#ifdef CONFIG_SYSCTL 29int sysctl_tcp_syncookies __read_mostly = 1;
30#define SYNC_INIT 0 /* let the user enable it */
31#else
32#define SYNC_INIT 1
33#endif
34
35int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
36EXPORT_SYMBOL(sysctl_tcp_syncookies); 30EXPORT_SYMBOL(sysctl_tcp_syncookies);
37 31
38int sysctl_tcp_abort_on_overflow __read_mostly; 32int sysctl_tcp_abort_on_overflow __read_mostly;
@@ -96,13 +90,14 @@ enum tcp_tw_status
96tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 90tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
97 const struct tcphdr *th) 91 const struct tcphdr *th)
98{ 92{
99 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
100 struct tcp_options_received tmp_opt; 93 struct tcp_options_received tmp_opt;
94 u8 *hash_location;
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
101 int paws_reject = 0; 96 int paws_reject = 0;
102 97
103 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 98 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
104 tmp_opt.tstamp_ok = 1; 99 tmp_opt.tstamp_ok = 1;
105 tcp_parse_options(skb, &tmp_opt, 1, NULL); 100 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
106 101
107 if (tmp_opt.saw_tstamp) { 102 if (tmp_opt.saw_tstamp) {
108 tmp_opt.ts_recent = tcptw->tw_ts_recent; 103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -389,14 +384,43 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
389 const struct inet_request_sock *ireq = inet_rsk(req); 384 const struct inet_request_sock *ireq = inet_rsk(req);
390 struct tcp_request_sock *treq = tcp_rsk(req); 385 struct tcp_request_sock *treq = tcp_rsk(req);
391 struct inet_connection_sock *newicsk = inet_csk(newsk); 386 struct inet_connection_sock *newicsk = inet_csk(newsk);
392 struct tcp_sock *newtp; 387 struct tcp_sock *newtp = tcp_sk(newsk);
388 struct tcp_sock *oldtp = tcp_sk(sk);
389 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
390
391 /* TCP Cookie Transactions require space for the cookie pair,
392 * as it differs for each connection. There is no need to
393 * copy any s_data_payload stored at the original socket.
394 * Failure will prevent resuming the connection.
395 *
396 * Presumed copied, in order of appearance:
397 * cookie_in_always, cookie_out_never
398 */
399 if (oldcvp != NULL) {
400 struct tcp_cookie_values *newcvp =
401 kzalloc(sizeof(*newtp->cookie_values),
402 GFP_ATOMIC);
403
404 if (newcvp != NULL) {
405 kref_init(&newcvp->kref);
406 newcvp->cookie_desired =
407 oldcvp->cookie_desired;
408 newtp->cookie_values = newcvp;
409 } else {
410 /* Not Yet Implemented */
411 newtp->cookie_values = NULL;
412 }
413 }
393 414
394 /* Now setup tcp_sock */ 415 /* Now setup tcp_sock */
395 newtp = tcp_sk(newsk);
396 newtp->pred_flags = 0; 416 newtp->pred_flags = 0;
397 newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; 417
398 newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; 418 newtp->rcv_wup = newtp->copied_seq =
399 newtp->snd_up = treq->snt_isn + 1; 419 newtp->rcv_nxt = treq->rcv_isn + 1;
420
421 newtp->snd_sml = newtp->snd_una =
422 newtp->snd_nxt = newtp->snd_up =
423 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
400 424
401 tcp_prequeue_init(newtp); 425 tcp_prequeue_init(newtp);
402 426
@@ -429,8 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
429 tcp_set_ca_state(newsk, TCP_CA_Open); 453 tcp_set_ca_state(newsk, TCP_CA_Open);
430 tcp_init_xmit_timers(newsk); 454 tcp_init_xmit_timers(newsk);
431 skb_queue_head_init(&newtp->out_of_order_queue); 455 skb_queue_head_init(&newtp->out_of_order_queue);
432 newtp->write_seq = treq->snt_isn + 1; 456 newtp->write_seq = newtp->pushed_seq =
433 newtp->pushed_seq = newtp->write_seq; 457 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
434 458
435 newtp->rx_opt.saw_tstamp = 0; 459 newtp->rx_opt.saw_tstamp = 0;
436 460
@@ -476,7 +500,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
476 if (newtp->af_specific->md5_lookup(sk, newsk)) 500 if (newtp->af_specific->md5_lookup(sk, newsk))
477 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 501 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
478#endif 502#endif
479 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 503 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
480 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 504 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
481 newtp->rx_opt.mss_clamp = req->mss; 505 newtp->rx_opt.mss_clamp = req->mss;
482 TCP_ECN_openreq_child(newtp, req); 506 TCP_ECN_openreq_child(newtp, req);
@@ -495,16 +519,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
495 struct request_sock *req, 519 struct request_sock *req,
496 struct request_sock **prev) 520 struct request_sock **prev)
497{ 521{
522 struct tcp_options_received tmp_opt;
523 u8 *hash_location;
524 struct sock *child;
498 const struct tcphdr *th = tcp_hdr(skb); 525 const struct tcphdr *th = tcp_hdr(skb);
499 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
500 int paws_reject = 0; 527 int paws_reject = 0;
501 struct tcp_options_received tmp_opt;
502 struct sock *child;
503 struct dst_entry *dst = inet_csk_route_req(sk, req);
504 528
505 tmp_opt.saw_tstamp = 0; 529 if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) {
506 if (th->doff > (sizeof(struct tcphdr)>>2)) { 530 tmp_opt.tstamp_ok = 1;
507 tcp_parse_options(skb, &tmp_opt, 0, dst); 531 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
508 532
509 if (tmp_opt.saw_tstamp) { 533 if (tmp_opt.saw_tstamp) {
510 tmp_opt.ts_recent = req->ts_recent; 534 tmp_opt.ts_recent = req->ts_recent;
@@ -517,8 +541,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
517 } 541 }
518 } 542 }
519 543
520 dst_release(dst);
521
522 /* Check for pure retransmitted SYN. */ 544 /* Check for pure retransmitted SYN. */
523 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && 545 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
524 flg == TCP_FLAG_SYN && 546 flg == TCP_FLAG_SYN &&
@@ -540,7 +562,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
540 * Enforce "SYN-ACK" according to figure 8, figure 6 562 * Enforce "SYN-ACK" according to figure 8, figure 6
541 * of RFC793, fixed by RFC1122. 563 * of RFC793, fixed by RFC1122.
542 */ 564 */
543 req->rsk_ops->rtx_syn_ack(sk, req); 565 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
544 return NULL; 566 return NULL;
545 } 567 }
546 568
@@ -599,7 +621,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
599 * Invalid ACK: reset will be sent by listening socket 621 * Invalid ACK: reset will be sent by listening socket
600 */ 622 */
601 if ((flg & TCP_FLAG_ACK) && 623 if ((flg & TCP_FLAG_ACK) &&
602 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) 624 (TCP_SKB_CB(skb)->ack_seq !=
625 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
603 return sk; 626 return sk;
604 627
605 /* Also, it would be not so bad idea to check rcv_tsecr, which 628 /* Also, it would be not so bad idea to check rcv_tsecr, which
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 616c686ca253..93316a96d820 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -59,6 +59,10 @@ int sysctl_tcp_base_mss __read_mostly = 512;
59/* By default, RFC2861 behavior. */ 59/* By default, RFC2861 behavior. */
60int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 60int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
61 61
62int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
63EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
64
65
62/* Account for new data that has been sent to the network. */ 66/* Account for new data that has been sent to the network. */
63static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 67static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
64{ 68{
@@ -362,15 +366,45 @@ static inline int tcp_urg_mode(const struct tcp_sock *tp)
362#define OPTION_TS (1 << 1) 366#define OPTION_TS (1 << 1)
363#define OPTION_MD5 (1 << 2) 367#define OPTION_MD5 (1 << 2)
364#define OPTION_WSCALE (1 << 3) 368#define OPTION_WSCALE (1 << 3)
369#define OPTION_COOKIE_EXTENSION (1 << 4)
365 370
366struct tcp_out_options { 371struct tcp_out_options {
367 u8 options; /* bit field of OPTION_* */ 372 u8 options; /* bit field of OPTION_* */
368 u8 ws; /* window scale, 0 to disable */ 373 u8 ws; /* window scale, 0 to disable */
369 u8 num_sack_blocks; /* number of SACK blocks to include */ 374 u8 num_sack_blocks; /* number of SACK blocks to include */
375 u8 hash_size; /* bytes in hash_location */
370 u16 mss; /* 0 to disable */ 376 u16 mss; /* 0 to disable */
371 __u32 tsval, tsecr; /* need to include OPTION_TS */ 377 __u32 tsval, tsecr; /* need to include OPTION_TS */
378 __u8 *hash_location; /* temporary pointer, overloaded */
372}; 379};
373 380
381/* The sysctl int routines are generic, so check consistency here.
382 */
383static u8 tcp_cookie_size_check(u8 desired)
384{
385 if (desired > 0) {
386 /* previously specified */
387 return desired;
388 }
389 if (sysctl_tcp_cookie_size <= 0) {
390 /* no default specified */
391 return 0;
392 }
393 if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
394 /* value too small, specify minimum */
395 return TCP_COOKIE_MIN;
396 }
397 if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
398 /* value too large, specify maximum */
399 return TCP_COOKIE_MAX;
400 }
401 if (0x1 & sysctl_tcp_cookie_size) {
402 /* 8-bit multiple, illegal, fix it */
403 return (u8)(sysctl_tcp_cookie_size + 0x1);
404 }
405 return (u8)sysctl_tcp_cookie_size;
406}
407
374/* Write previously computed TCP options to the packet. 408/* Write previously computed TCP options to the packet.
375 * 409 *
376 * Beware: Something in the Internet is very sensitive to the ordering of 410 * Beware: Something in the Internet is very sensitive to the ordering of
@@ -385,17 +419,34 @@ struct tcp_out_options {
385 * (but it may well be that other scenarios fail similarly). 419 * (but it may well be that other scenarios fail similarly).
386 */ 420 */
387static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 421static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
388 const struct tcp_out_options *opts, 422 struct tcp_out_options *opts)
389 __u8 **md5_hash) { 423{
390 if (unlikely(OPTION_MD5 & opts->options)) { 424 u8 options = opts->options; /* mungable copy */
391 *ptr++ = htonl((TCPOPT_NOP << 24) | 425
392 (TCPOPT_NOP << 16) | 426 /* Having both authentication and cookies for security is redundant,
393 (TCPOPT_MD5SIG << 8) | 427 * and there's certainly not enough room. Instead, the cookie-less
394 TCPOLEN_MD5SIG); 428 * extension variant is proposed.
395 *md5_hash = (__u8 *)ptr; 429 *
430 * Consider the pessimal case with authentication. The options
431 * could look like:
432 * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
433 */
434 if (unlikely(OPTION_MD5 & options)) {
435 if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
436 *ptr++ = htonl((TCPOPT_COOKIE << 24) |
437 (TCPOLEN_COOKIE_BASE << 16) |
438 (TCPOPT_MD5SIG << 8) |
439 TCPOLEN_MD5SIG);
440 } else {
441 *ptr++ = htonl((TCPOPT_NOP << 24) |
442 (TCPOPT_NOP << 16) |
443 (TCPOPT_MD5SIG << 8) |
444 TCPOLEN_MD5SIG);
445 }
446 options &= ~OPTION_COOKIE_EXTENSION;
447 /* overload cookie hash location */
448 opts->hash_location = (__u8 *)ptr;
396 ptr += 4; 449 ptr += 4;
397 } else {
398 *md5_hash = NULL;
399 } 450 }
400 451
401 if (unlikely(opts->mss)) { 452 if (unlikely(opts->mss)) {
@@ -404,12 +455,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
404 opts->mss); 455 opts->mss);
405 } 456 }
406 457
407 if (likely(OPTION_TS & opts->options)) { 458 if (likely(OPTION_TS & options)) {
408 if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { 459 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
409 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 460 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
410 (TCPOLEN_SACK_PERM << 16) | 461 (TCPOLEN_SACK_PERM << 16) |
411 (TCPOPT_TIMESTAMP << 8) | 462 (TCPOPT_TIMESTAMP << 8) |
412 TCPOLEN_TIMESTAMP); 463 TCPOLEN_TIMESTAMP);
464 options &= ~OPTION_SACK_ADVERTISE;
413 } else { 465 } else {
414 *ptr++ = htonl((TCPOPT_NOP << 24) | 466 *ptr++ = htonl((TCPOPT_NOP << 24) |
415 (TCPOPT_NOP << 16) | 467 (TCPOPT_NOP << 16) |
@@ -420,15 +472,52 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
420 *ptr++ = htonl(opts->tsecr); 472 *ptr++ = htonl(opts->tsecr);
421 } 473 }
422 474
423 if (unlikely(OPTION_SACK_ADVERTISE & opts->options && 475 /* Specification requires after timestamp, so do it now.
424 !(OPTION_TS & opts->options))) { 476 *
477 * Consider the pessimal case without authentication. The options
478 * could look like:
479 * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
480 */
481 if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
482 __u8 *cookie_copy = opts->hash_location;
483 u8 cookie_size = opts->hash_size;
484
485 /* 8-bit multiple handled in tcp_cookie_size_check() above,
486 * and elsewhere.
487 */
488 if (0x2 & cookie_size) {
489 __u8 *p = (__u8 *)ptr;
490
491 /* 16-bit multiple */
492 *p++ = TCPOPT_COOKIE;
493 *p++ = TCPOLEN_COOKIE_BASE + cookie_size;
494 *p++ = *cookie_copy++;
495 *p++ = *cookie_copy++;
496 ptr++;
497 cookie_size -= 2;
498 } else {
499 /* 32-bit multiple */
500 *ptr++ = htonl(((TCPOPT_NOP << 24) |
501 (TCPOPT_NOP << 16) |
502 (TCPOPT_COOKIE << 8) |
503 TCPOLEN_COOKIE_BASE) +
504 cookie_size);
505 }
506
507 if (cookie_size > 0) {
508 memcpy(ptr, cookie_copy, cookie_size);
509 ptr += (cookie_size / 4);
510 }
511 }
512
513 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
425 *ptr++ = htonl((TCPOPT_NOP << 24) | 514 *ptr++ = htonl((TCPOPT_NOP << 24) |
426 (TCPOPT_NOP << 16) | 515 (TCPOPT_NOP << 16) |
427 (TCPOPT_SACK_PERM << 8) | 516 (TCPOPT_SACK_PERM << 8) |
428 TCPOLEN_SACK_PERM); 517 TCPOLEN_SACK_PERM);
429 } 518 }
430 519
431 if (unlikely(OPTION_WSCALE & opts->options)) { 520 if (unlikely(OPTION_WSCALE & options)) {
432 *ptr++ = htonl((TCPOPT_NOP << 24) | 521 *ptr++ = htonl((TCPOPT_NOP << 24) |
433 (TCPOPT_WINDOW << 16) | 522 (TCPOPT_WINDOW << 16) |
434 (TCPOLEN_WINDOW << 8) | 523 (TCPOLEN_WINDOW << 8) |
@@ -463,14 +552,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
463 struct tcp_out_options *opts, 552 struct tcp_out_options *opts,
464 struct tcp_md5sig_key **md5) { 553 struct tcp_md5sig_key **md5) {
465 struct tcp_sock *tp = tcp_sk(sk); 554 struct tcp_sock *tp = tcp_sk(sk);
466 unsigned size = 0; 555 struct tcp_cookie_values *cvp = tp->cookie_values;
467 struct dst_entry *dst = __sk_dst_get(sk); 556 struct dst_entry *dst = __sk_dst_get(sk);
557 unsigned remaining = MAX_TCP_OPTION_SPACE;
558 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
559 tcp_cookie_size_check(cvp->cookie_desired) :
560 0;
468 561
469#ifdef CONFIG_TCP_MD5SIG 562#ifdef CONFIG_TCP_MD5SIG
470 *md5 = tp->af_specific->md5_lookup(sk, sk); 563 *md5 = tp->af_specific->md5_lookup(sk, sk);
471 if (*md5) { 564 if (*md5) {
472 opts->options |= OPTION_MD5; 565 opts->options |= OPTION_MD5;
473 size += TCPOLEN_MD5SIG_ALIGNED; 566 remaining -= TCPOLEN_MD5SIG_ALIGNED;
474 } 567 }
475#else 568#else
476 *md5 = NULL; 569 *md5 = NULL;
@@ -486,7 +579,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
486 * SACKs don't matter, we never delay an ACK when we have any of those 579 * SACKs don't matter, we never delay an ACK when we have any of those
487 * going out. */ 580 * going out. */
488 opts->mss = tcp_advertise_mss(sk); 581 opts->mss = tcp_advertise_mss(sk);
489 size += TCPOLEN_MSS_ALIGNED; 582 remaining -= TCPOLEN_MSS_ALIGNED;
490 583
491 if (likely(sysctl_tcp_timestamps && 584 if (likely(sysctl_tcp_timestamps &&
492 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && 585 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
@@ -494,22 +587,68 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
494 opts->options |= OPTION_TS; 587 opts->options |= OPTION_TS;
495 opts->tsval = TCP_SKB_CB(skb)->when; 588 opts->tsval = TCP_SKB_CB(skb)->when;
496 opts->tsecr = tp->rx_opt.ts_recent; 589 opts->tsecr = tp->rx_opt.ts_recent;
497 size += TCPOLEN_TSTAMP_ALIGNED; 590 remaining -= TCPOLEN_TSTAMP_ALIGNED;
498 } 591 }
499 if (likely(sysctl_tcp_window_scaling && 592 if (likely(sysctl_tcp_window_scaling &&
500 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { 593 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
501 opts->ws = tp->rx_opt.rcv_wscale; 594 opts->ws = tp->rx_opt.rcv_wscale;
502 opts->options |= OPTION_WSCALE; 595 opts->options |= OPTION_WSCALE;
503 size += TCPOLEN_WSCALE_ALIGNED; 596 remaining -= TCPOLEN_WSCALE_ALIGNED;
504 } 597 }
505 if (likely(sysctl_tcp_sack && 598 if (likely(sysctl_tcp_sack &&
506 !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { 599 !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
507 opts->options |= OPTION_SACK_ADVERTISE; 600 opts->options |= OPTION_SACK_ADVERTISE;
508 if (unlikely(!(OPTION_TS & opts->options))) 601 if (unlikely(!(OPTION_TS & opts->options)))
509 size += TCPOLEN_SACKPERM_ALIGNED; 602 remaining -= TCPOLEN_SACKPERM_ALIGNED;
510 } 603 }
511 604
512 return size; 605 /* Note that timestamps are required by the specification.
606 *
607 * Odd numbers of bytes are prohibited by the specification, ensuring
608 * that the cookie is 16-bit aligned, and the resulting cookie pair is
609 * 32-bit aligned.
610 */
611 if (*md5 == NULL &&
612 (OPTION_TS & opts->options) &&
613 cookie_size > 0) {
614 int need = TCPOLEN_COOKIE_BASE + cookie_size;
615
616 if (0x2 & need) {
617 /* 32-bit multiple */
618 need += 2; /* NOPs */
619
620 if (need > remaining) {
621 /* try shrinking cookie to fit */
622 cookie_size -= 2;
623 need -= 4;
624 }
625 }
626 while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
627 cookie_size -= 4;
628 need -= 4;
629 }
630 if (TCP_COOKIE_MIN <= cookie_size) {
631 opts->options |= OPTION_COOKIE_EXTENSION;
632 opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
633 opts->hash_size = cookie_size;
634
635 /* Remember for future incarnations. */
636 cvp->cookie_desired = cookie_size;
637
638 if (cvp->cookie_desired != cvp->cookie_pair_size) {
639 /* Currently use random bytes as a nonce,
640 * assuming these are completely unpredictable
641 * by hostile users of the same system.
642 */
643 get_random_bytes(&cvp->cookie_pair[0],
644 cookie_size);
645 cvp->cookie_pair_size = cookie_size;
646 }
647
648 remaining -= need;
649 }
650 }
651 return MAX_TCP_OPTION_SPACE - remaining;
513} 652}
514 653
515/* Set up TCP options for SYN-ACKs. */ 654/* Set up TCP options for SYN-ACKs. */
@@ -517,48 +656,77 @@ static unsigned tcp_synack_options(struct sock *sk,
517 struct request_sock *req, 656 struct request_sock *req,
518 unsigned mss, struct sk_buff *skb, 657 unsigned mss, struct sk_buff *skb,
519 struct tcp_out_options *opts, 658 struct tcp_out_options *opts,
520 struct tcp_md5sig_key **md5) { 659 struct tcp_md5sig_key **md5,
521 unsigned size = 0; 660 struct tcp_extend_values *xvp)
661{
522 struct inet_request_sock *ireq = inet_rsk(req); 662 struct inet_request_sock *ireq = inet_rsk(req);
523 char doing_ts; 663 unsigned remaining = MAX_TCP_OPTION_SPACE;
664 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
665 xvp->cookie_plus :
666 0;
667 bool doing_ts = ireq->tstamp_ok;
524 668
525#ifdef CONFIG_TCP_MD5SIG 669#ifdef CONFIG_TCP_MD5SIG
526 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 670 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
527 if (*md5) { 671 if (*md5) {
528 opts->options |= OPTION_MD5; 672 opts->options |= OPTION_MD5;
529 size += TCPOLEN_MD5SIG_ALIGNED; 673 remaining -= TCPOLEN_MD5SIG_ALIGNED;
674
675 /* We can't fit any SACK blocks in a packet with MD5 + TS
676 * options. There was discussion about disabling SACK
677 * rather than TS in order to fit in better with old,
678 * buggy kernels, but that was deemed to be unnecessary.
679 */
680 doing_ts &= !ireq->sack_ok;
530 } 681 }
531#else 682#else
532 *md5 = NULL; 683 *md5 = NULL;
533#endif 684#endif
534 685
535 /* we can't fit any SACK blocks in a packet with MD5 + TS 686 /* We always send an MSS option. */
536 options. There was discussion about disabling SACK rather than TS in
537 order to fit in better with old, buggy kernels, but that was deemed
538 to be unnecessary. */
539 doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok);
540
541 opts->mss = mss; 687 opts->mss = mss;
542 size += TCPOLEN_MSS_ALIGNED; 688 remaining -= TCPOLEN_MSS_ALIGNED;
543 689
544 if (likely(ireq->wscale_ok)) { 690 if (likely(ireq->wscale_ok)) {
545 opts->ws = ireq->rcv_wscale; 691 opts->ws = ireq->rcv_wscale;
546 opts->options |= OPTION_WSCALE; 692 opts->options |= OPTION_WSCALE;
547 size += TCPOLEN_WSCALE_ALIGNED; 693 remaining -= TCPOLEN_WSCALE_ALIGNED;
548 } 694 }
549 if (likely(doing_ts)) { 695 if (likely(doing_ts)) {
550 opts->options |= OPTION_TS; 696 opts->options |= OPTION_TS;
551 opts->tsval = TCP_SKB_CB(skb)->when; 697 opts->tsval = TCP_SKB_CB(skb)->when;
552 opts->tsecr = req->ts_recent; 698 opts->tsecr = req->ts_recent;
553 size += TCPOLEN_TSTAMP_ALIGNED; 699 remaining -= TCPOLEN_TSTAMP_ALIGNED;
554 } 700 }
555 if (likely(ireq->sack_ok)) { 701 if (likely(ireq->sack_ok)) {
556 opts->options |= OPTION_SACK_ADVERTISE; 702 opts->options |= OPTION_SACK_ADVERTISE;
557 if (unlikely(!doing_ts)) 703 if (unlikely(!doing_ts))
558 size += TCPOLEN_SACKPERM_ALIGNED; 704 remaining -= TCPOLEN_SACKPERM_ALIGNED;
559 } 705 }
560 706
561 return size; 707 /* Similar rationale to tcp_syn_options() applies here, too.
708 * If the <SYN> options fit, the same options should fit now!
709 */
710 if (*md5 == NULL &&
711 doing_ts &&
712 cookie_plus > TCPOLEN_COOKIE_BASE) {
713 int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
714
715 if (0x2 & need) {
716 /* 32-bit multiple */
717 need += 2; /* NOPs */
718 }
719 if (need <= remaining) {
720 opts->options |= OPTION_COOKIE_EXTENSION;
721 opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
722 remaining -= need;
723 } else {
724 /* There's no error return, so flag it. */
725 xvp->cookie_out_never = 1; /* true */
726 opts->hash_size = 0;
727 }
728 }
729 return MAX_TCP_OPTION_SPACE - remaining;
562} 730}
563 731
564/* Compute TCP options for ESTABLISHED sockets. This is not the 732/* Compute TCP options for ESTABLISHED sockets. This is not the
@@ -624,7 +792,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
624 struct tcp_out_options opts; 792 struct tcp_out_options opts;
625 unsigned tcp_options_size, tcp_header_size; 793 unsigned tcp_options_size, tcp_header_size;
626 struct tcp_md5sig_key *md5; 794 struct tcp_md5sig_key *md5;
627 __u8 *md5_hash_location;
628 struct tcphdr *th; 795 struct tcphdr *th;
629 int err; 796 int err;
630 797
@@ -695,7 +862,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
695 } 862 }
696 } 863 }
697 864
698 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 865 tcp_options_write((__be32 *)(th + 1), tp, &opts);
699 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) 866 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
700 TCP_ECN_send(sk, skb, tcp_header_size); 867 TCP_ECN_send(sk, skb, tcp_header_size);
701 868
@@ -703,7 +870,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
703 /* Calculate the MD5 hash, as we have all we need now */ 870 /* Calculate the MD5 hash, as we have all we need now */
704 if (md5) { 871 if (md5) {
705 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 872 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
706 tp->af_specific->calc_md5_hash(md5_hash_location, 873 tp->af_specific->calc_md5_hash(opts.hash_location,
707 md5, sk, NULL, skb); 874 md5, sk, NULL, skb);
708 } 875 }
709#endif 876#endif
@@ -1923,8 +2090,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1923 * case, when window is shrunk to zero. In this case 2090 * case, when window is shrunk to zero. In this case
1924 * our retransmit serves as a zero window probe. 2091 * our retransmit serves as a zero window probe.
1925 */ 2092 */
1926 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) 2093 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
1927 && TCP_SKB_CB(skb)->seq != tp->snd_una) 2094 TCP_SKB_CB(skb)->seq != tp->snd_una)
1928 return -EAGAIN; 2095 return -EAGAIN;
1929 2096
1930 if (skb->len > cur_mss) { 2097 if (skb->len > cur_mss) {
@@ -2224,16 +2391,17 @@ int tcp_send_synack(struct sock *sk)
2224 2391
2225/* Prepare a SYN-ACK. */ 2392/* Prepare a SYN-ACK. */
2226struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2393struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2227 struct request_sock *req) 2394 struct request_sock *req,
2395 struct request_values *rvp)
2228{ 2396{
2397 struct tcp_out_options opts;
2398 struct tcp_extend_values *xvp = tcp_xv(rvp);
2229 struct inet_request_sock *ireq = inet_rsk(req); 2399 struct inet_request_sock *ireq = inet_rsk(req);
2230 struct tcp_sock *tp = tcp_sk(sk); 2400 struct tcp_sock *tp = tcp_sk(sk);
2231 struct tcphdr *th; 2401 struct tcphdr *th;
2232 int tcp_header_size;
2233 struct tcp_out_options opts;
2234 struct sk_buff *skb; 2402 struct sk_buff *skb;
2235 struct tcp_md5sig_key *md5; 2403 struct tcp_md5sig_key *md5;
2236 __u8 *md5_hash_location; 2404 int tcp_header_size;
2237 int mss; 2405 int mss;
2238 2406
2239 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2407 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
@@ -2271,8 +2439,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2271#endif 2439#endif
2272 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2440 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2273 tcp_header_size = tcp_synack_options(sk, req, mss, 2441 tcp_header_size = tcp_synack_options(sk, req, mss,
2274 skb, &opts, &md5) + 2442 skb, &opts, &md5, xvp)
2275 sizeof(struct tcphdr); 2443 + sizeof(*th);
2276 2444
2277 skb_push(skb, tcp_header_size); 2445 skb_push(skb, tcp_header_size);
2278 skb_reset_transport_header(skb); 2446 skb_reset_transport_header(skb);
@@ -2289,19 +2457,58 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2289 */ 2457 */
2290 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2458 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2291 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2459 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
2460
2461 if (OPTION_COOKIE_EXTENSION & opts.options) {
2462 const struct tcp_cookie_values *cvp = tp->cookie_values;
2463
2464 if (cvp != NULL &&
2465 cvp->s_data_constant &&
2466 cvp->s_data_desired > 0) {
2467 u8 *buf = skb_put(skb, cvp->s_data_desired);
2468
2469 /* copy data directly from the listening socket. */
2470 memcpy(buf, cvp->s_data_payload, cvp->s_data_desired);
2471 TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired;
2472 }
2473
2474 if (opts.hash_size > 0) {
2475 __u32 workspace[SHA_WORKSPACE_WORDS];
2476 u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
2477 u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
2478
2479 /* Secret recipe depends on the Timestamp, (future)
2480 * Sequence and Acknowledgment Numbers, Initiator
2481 * Cookie, and others handled by IP variant caller.
2482 */
2483 *tail-- ^= opts.tsval;
2484 *tail-- ^= tcp_rsk(req)->rcv_isn + 1;
2485 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2486
2487 /* recommended */
2488 *tail-- ^= ((th->dest << 16) | th->source);
2489 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2490
2491 sha_transform((__u32 *)&xvp->cookie_bakery[0],
2492 (char *)mess,
2493 &workspace[0]);
2494 opts.hash_location =
2495 (__u8 *)&xvp->cookie_bakery[0];
2496 }
2497 }
2498
2292 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2499 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2293 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2500 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2294 2501
2295 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2502 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2296 th->window = htons(min(req->rcv_wnd, 65535U)); 2503 th->window = htons(min(req->rcv_wnd, 65535U));
2297 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 2504 tcp_options_write((__be32 *)(th + 1), tp, &opts);
2298 th->doff = (tcp_header_size >> 2); 2505 th->doff = (tcp_header_size >> 2);
2299 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2506 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
2300 2507
2301#ifdef CONFIG_TCP_MD5SIG 2508#ifdef CONFIG_TCP_MD5SIG
2302 /* Okay, we have all we need - do the md5 hash if needed */ 2509 /* Okay, we have all we need - do the md5 hash if needed */
2303 if (md5) { 2510 if (md5) {
2304 tcp_rsk(req)->af_specific->calc_md5_hash(md5_hash_location, 2511 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
2305 md5, NULL, req, skb); 2512 md5, NULL, req, skb);
2306 } 2513 }
2307#endif 2514#endif
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 7a3cc2ffad84..bb110c5ce1d2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -95,8 +95,8 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
95 95
96 /* Only update if port matches */ 96 /* Only update if port matches */
97 if ((port == 0 || ntohs(inet->inet_dport) == port || 97 if ((port == 0 || ntohs(inet->inet_dport) == port ||
98 ntohs(inet->inet_sport) == port) 98 ntohs(inet->inet_sport) == port) &&
99 && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { 99 (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
100 100
101 spin_lock(&tcp_probe.lock); 101 spin_lock(&tcp_probe.lock);
102 /* If log fills, just silently drop */ 102 /* If log fills, just silently drop */
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index e9bbff746488..b612acf76183 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -165,9 +165,8 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
165 * every other rtt. 165 * every other rtt.
166 */ 166 */
167 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 167 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
168 if (veno->inc 168 if (veno->inc &&
169 && tp->snd_cwnd < 169 tp->snd_cwnd < tp->snd_cwnd_clamp) {
170 tp->snd_cwnd_clamp) {
171 tp->snd_cwnd++; 170 tp->snd_cwnd++;
172 veno->inc = 0; 171 veno->inc = 0;
173 } else 172 } else
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 66b6821b984e..a0f240358892 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -157,8 +157,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
157 157
158 if (queue > TCP_YEAH_ALPHA || 158 if (queue > TCP_YEAH_ALPHA ||
159 rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) { 159 rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
160 if (queue > TCP_YEAH_ALPHA 160 if (queue > TCP_YEAH_ALPHA &&
161 && tp->snd_cwnd > yeah->reno_count) { 161 tp->snd_cwnd > yeah->reno_count) {
162 u32 reduction = min(queue / TCP_YEAH_GAMMA , 162 u32 reduction = min(queue / TCP_YEAH_GAMMA ,
163 tp->snd_cwnd >> TCP_YEAH_EPSILON); 163 tp->snd_cwnd >> TCP_YEAH_EPSILON);
164 164
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4274c1cc78fd..1f9534846ca9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -136,33 +136,67 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
136 struct hlist_nulls_node *node; 136 struct hlist_nulls_node *node;
137 137
138 sk_nulls_for_each(sk2, node, &hslot->head) 138 sk_nulls_for_each(sk2, node, &hslot->head)
139 if (net_eq(sock_net(sk2), net) && 139 if (net_eq(sock_net(sk2), net) &&
140 sk2 != sk && 140 sk2 != sk &&
141 (bitmap || sk2->sk_hash == num) && 141 (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
142 (!sk2->sk_reuse || !sk->sk_reuse) && 142 (!sk2->sk_reuse || !sk->sk_reuse) &&
143 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 143 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
144 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 144 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
145 (*saddr_comp)(sk, sk2)) { 145 (*saddr_comp)(sk, sk2)) {
146 if (bitmap) 146 if (bitmap)
147 __set_bit(sk2->sk_hash >> log, bitmap); 147 __set_bit(udp_sk(sk2)->udp_port_hash >> log,
148 bitmap);
148 else 149 else
149 return 1; 150 return 1;
150 } 151 }
151 return 0; 152 return 0;
152} 153}
153 154
155/*
156 * Note: we still hold spinlock of primary hash chain, so no other writer
157 * can insert/delete a socket with local_port == num
158 */
159static int udp_lib_lport_inuse2(struct net *net, __u16 num,
160 struct udp_hslot *hslot2,
161 struct sock *sk,
162 int (*saddr_comp)(const struct sock *sk1,
163 const struct sock *sk2))
164{
165 struct sock *sk2;
166 struct hlist_nulls_node *node;
167 int res = 0;
168
169 spin_lock(&hslot2->lock);
170 udp_portaddr_for_each_entry(sk2, node, &hslot2->head)
171 if (net_eq(sock_net(sk2), net) &&
172 sk2 != sk &&
173 (udp_sk(sk2)->udp_port_hash == num) &&
174 (!sk2->sk_reuse || !sk->sk_reuse) &&
175 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
176 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
177 (*saddr_comp)(sk, sk2)) {
178 res = 1;
179 break;
180 }
181 spin_unlock(&hslot2->lock);
182 return res;
183}
184
154/** 185/**
155 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 186 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
156 * 187 *
157 * @sk: socket struct in question 188 * @sk: socket struct in question
158 * @snum: port number to look up 189 * @snum: port number to look up
159 * @saddr_comp: AF-dependent comparison of bound local IP addresses 190 * @saddr_comp: AF-dependent comparison of bound local IP addresses
191 * @hash2_nulladdr: AF-dependant hash value in secondary hash chains,
192 * with NULL address
160 */ 193 */
161int udp_lib_get_port(struct sock *sk, unsigned short snum, 194int udp_lib_get_port(struct sock *sk, unsigned short snum,
162 int (*saddr_comp)(const struct sock *sk1, 195 int (*saddr_comp)(const struct sock *sk1,
163 const struct sock *sk2)) 196 const struct sock *sk2),
197 unsigned int hash2_nulladdr)
164{ 198{
165 struct udp_hslot *hslot; 199 struct udp_hslot *hslot, *hslot2;
166 struct udp_table *udptable = sk->sk_prot->h.udp_table; 200 struct udp_table *udptable = sk->sk_prot->h.udp_table;
167 int error = 1; 201 int error = 1;
168 struct net *net = sock_net(sk); 202 struct net *net = sock_net(sk);
@@ -209,16 +243,49 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
209 } else { 243 } else {
210 hslot = udp_hashslot(udptable, net, snum); 244 hslot = udp_hashslot(udptable, net, snum);
211 spin_lock_bh(&hslot->lock); 245 spin_lock_bh(&hslot->lock);
246 if (hslot->count > 10) {
247 int exist;
248 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
249
250 slot2 &= udptable->mask;
251 hash2_nulladdr &= udptable->mask;
252
253 hslot2 = udp_hashslot2(udptable, slot2);
254 if (hslot->count < hslot2->count)
255 goto scan_primary_hash;
256
257 exist = udp_lib_lport_inuse2(net, snum, hslot2,
258 sk, saddr_comp);
259 if (!exist && (hash2_nulladdr != slot2)) {
260 hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
261 exist = udp_lib_lport_inuse2(net, snum, hslot2,
262 sk, saddr_comp);
263 }
264 if (exist)
265 goto fail_unlock;
266 else
267 goto found;
268 }
269scan_primary_hash:
212 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 270 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
213 saddr_comp, 0)) 271 saddr_comp, 0))
214 goto fail_unlock; 272 goto fail_unlock;
215 } 273 }
216found: 274found:
217 inet_sk(sk)->inet_num = snum; 275 inet_sk(sk)->inet_num = snum;
218 sk->sk_hash = snum; 276 udp_sk(sk)->udp_port_hash = snum;
277 udp_sk(sk)->udp_portaddr_hash ^= snum;
219 if (sk_unhashed(sk)) { 278 if (sk_unhashed(sk)) {
220 sk_nulls_add_node_rcu(sk, &hslot->head); 279 sk_nulls_add_node_rcu(sk, &hslot->head);
280 hslot->count++;
221 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 281 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
282
283 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
284 spin_lock(&hslot2->lock);
285 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
286 &hslot2->head);
287 hslot2->count++;
288 spin_unlock(&hslot2->lock);
222 } 289 }
223 error = 0; 290 error = 0;
224fail_unlock: 291fail_unlock:
@@ -237,9 +304,22 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
237 inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); 304 inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
238} 305}
239 306
307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
308 unsigned int port)
309{
310 return jhash_1word(saddr, net_hash_mix(net)) ^ port;
311}
312
240int udp_v4_get_port(struct sock *sk, unsigned short snum) 313int udp_v4_get_port(struct sock *sk, unsigned short snum)
241{ 314{
242 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal); 315 unsigned int hash2_nulladdr =
316 udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum);
317 unsigned int hash2_partial =
318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
319
320 /* precompute partial secondary hash */
321 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
322 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
243} 323}
244 324
245static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, 325static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
@@ -248,7 +328,7 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
248{ 328{
249 int score = -1; 329 int score = -1;
250 330
251 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 331 if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
252 !ipv6_only_sock(sk)) { 332 !ipv6_only_sock(sk)) {
253 struct inet_sock *inet = inet_sk(sk); 333 struct inet_sock *inet = inet_sk(sk);
254 334
@@ -277,6 +357,89 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
277 return score; 357 return score;
278} 358}
279 359
360/*
361 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
362 */
363#define SCORE2_MAX (1 + 2 + 2 + 2)
364static inline int compute_score2(struct sock *sk, struct net *net,
365 __be32 saddr, __be16 sport,
366 __be32 daddr, unsigned int hnum, int dif)
367{
368 int score = -1;
369
370 if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) {
371 struct inet_sock *inet = inet_sk(sk);
372
373 if (inet->inet_rcv_saddr != daddr)
374 return -1;
375 if (inet->inet_num != hnum)
376 return -1;
377
378 score = (sk->sk_family == PF_INET ? 1 : 0);
379 if (inet->inet_daddr) {
380 if (inet->inet_daddr != saddr)
381 return -1;
382 score += 2;
383 }
384 if (inet->inet_dport) {
385 if (inet->inet_dport != sport)
386 return -1;
387 score += 2;
388 }
389 if (sk->sk_bound_dev_if) {
390 if (sk->sk_bound_dev_if != dif)
391 return -1;
392 score += 2;
393 }
394 }
395 return score;
396}
397
398
399/* called with read_rcu_lock() */
400static struct sock *udp4_lib_lookup2(struct net *net,
401 __be32 saddr, __be16 sport,
402 __be32 daddr, unsigned int hnum, int dif,
403 struct udp_hslot *hslot2, unsigned int slot2)
404{
405 struct sock *sk, *result;
406 struct hlist_nulls_node *node;
407 int score, badness;
408
409begin:
410 result = NULL;
411 badness = -1;
412 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
413 score = compute_score2(sk, net, saddr, sport,
414 daddr, hnum, dif);
415 if (score > badness) {
416 result = sk;
417 badness = score;
418 if (score == SCORE2_MAX)
419 goto exact_match;
420 }
421 }
422 /*
423 * if the nulls value we got at the end of this lookup is
424 * not the expected one, we must restart lookup.
425 * We probably met an item that was moved to another chain.
426 */
427 if (get_nulls_value(node) != slot2)
428 goto begin;
429
430 if (result) {
431exact_match:
432 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
433 result = NULL;
434 else if (unlikely(compute_score2(result, net, saddr, sport,
435 daddr, hnum, dif) < badness)) {
436 sock_put(result);
437 goto begin;
438 }
439 }
440 return result;
441}
442
280/* UDP is nearly always wildcards out the wazoo, it makes no sense to try 443/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
281 * harder than this. -DaveM 444 * harder than this. -DaveM
282 */ 445 */
@@ -287,11 +450,35 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
287 struct sock *sk, *result; 450 struct sock *sk, *result;
288 struct hlist_nulls_node *node; 451 struct hlist_nulls_node *node;
289 unsigned short hnum = ntohs(dport); 452 unsigned short hnum = ntohs(dport);
290 unsigned int hash = udp_hashfn(net, hnum, udptable->mask); 453 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
291 struct udp_hslot *hslot = &udptable->hash[hash]; 454 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
292 int score, badness; 455 int score, badness;
293 456
294 rcu_read_lock(); 457 rcu_read_lock();
458 if (hslot->count > 10) {
459 hash2 = udp4_portaddr_hash(net, daddr, hnum);
460 slot2 = hash2 & udptable->mask;
461 hslot2 = &udptable->hash2[slot2];
462 if (hslot->count < hslot2->count)
463 goto begin;
464
465 result = udp4_lib_lookup2(net, saddr, sport,
466 daddr, hnum, dif,
467 hslot2, slot2);
468 if (!result) {
469 hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum);
470 slot2 = hash2 & udptable->mask;
471 hslot2 = &udptable->hash2[slot2];
472 if (hslot->count < hslot2->count)
473 goto begin;
474
475 result = udp4_lib_lookup2(net, INADDR_ANY, sport,
476 daddr, hnum, dif,
477 hslot2, slot2);
478 }
479 rcu_read_unlock();
480 return result;
481 }
295begin: 482begin:
296 result = NULL; 483 result = NULL;
297 badness = -1; 484 badness = -1;
@@ -308,7 +495,7 @@ begin:
308 * not the expected one, we must restart lookup. 495 * not the expected one, we must restart lookup.
309 * We probably met an item that was moved to another chain. 496 * We probably met an item that was moved to another chain.
310 */ 497 */
311 if (get_nulls_value(node) != hash) 498 if (get_nulls_value(node) != slot)
312 goto begin; 499 goto begin;
313 500
314 if (result) { 501 if (result) {
@@ -358,13 +545,13 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
358 sk_nulls_for_each_from(s, node) { 545 sk_nulls_for_each_from(s, node) {
359 struct inet_sock *inet = inet_sk(s); 546 struct inet_sock *inet = inet_sk(s);
360 547
361 if (!net_eq(sock_net(s), net) || 548 if (!net_eq(sock_net(s), net) ||
362 s->sk_hash != hnum || 549 udp_sk(s)->udp_port_hash != hnum ||
363 (inet->inet_daddr && inet->inet_daddr != rmt_addr) || 550 (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
364 (inet->inet_dport != rmt_port && inet->inet_dport) || 551 (inet->inet_dport != rmt_port && inet->inet_dport) ||
365 (inet->inet_rcv_saddr && 552 (inet->inet_rcv_saddr &&
366 inet->inet_rcv_saddr != loc_addr) || 553 inet->inet_rcv_saddr != loc_addr) ||
367 ipv6_only_sock(s) || 554 ipv6_only_sock(s) ||
368 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) 555 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
369 continue; 556 continue;
370 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) 557 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
@@ -1005,9 +1192,7 @@ try_again:
1005 err = ulen; 1192 err = ulen;
1006 1193
1007out_free: 1194out_free:
1008 lock_sock(sk); 1195 skb_free_datagram_locked(sk, skb);
1009 skb_free_datagram(sk, skb);
1010 release_sock(sk);
1011out: 1196out:
1012 return err; 1197 return err;
1013 1198
@@ -1050,13 +1235,22 @@ void udp_lib_unhash(struct sock *sk)
1050{ 1235{
1051 if (sk_hashed(sk)) { 1236 if (sk_hashed(sk)) {
1052 struct udp_table *udptable = sk->sk_prot->h.udp_table; 1237 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1053 struct udp_hslot *hslot = udp_hashslot(udptable, sock_net(sk), 1238 struct udp_hslot *hslot, *hslot2;
1054 sk->sk_hash); 1239
1240 hslot = udp_hashslot(udptable, sock_net(sk),
1241 udp_sk(sk)->udp_port_hash);
1242 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1055 1243
1056 spin_lock_bh(&hslot->lock); 1244 spin_lock_bh(&hslot->lock);
1057 if (sk_nulls_del_node_init_rcu(sk)) { 1245 if (sk_nulls_del_node_init_rcu(sk)) {
1246 hslot->count--;
1058 inet_sk(sk)->inet_num = 0; 1247 inet_sk(sk)->inet_num = 0;
1059 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 1248 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1249
1250 spin_lock(&hslot2->lock);
1251 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1252 hslot2->count--;
1253 spin_unlock(&hslot2->lock);
1060 } 1254 }
1061 spin_unlock_bh(&hslot->lock); 1255 spin_unlock_bh(&hslot->lock);
1062 } 1256 }
@@ -1192,49 +1386,83 @@ drop:
1192 return -1; 1386 return -1;
1193} 1387}
1194 1388
1389
1390static void flush_stack(struct sock **stack, unsigned int count,
1391 struct sk_buff *skb, unsigned int final)
1392{
1393 unsigned int i;
1394 struct sk_buff *skb1 = NULL;
1395 struct sock *sk;
1396
1397 for (i = 0; i < count; i++) {
1398 sk = stack[i];
1399 if (likely(skb1 == NULL))
1400 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1401
1402 if (!skb1) {
1403 atomic_inc(&sk->sk_drops);
1404 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1405 IS_UDPLITE(sk));
1406 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1407 IS_UDPLITE(sk));
1408 }
1409
1410 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
1411 skb1 = NULL;
1412 }
1413 if (unlikely(skb1))
1414 kfree_skb(skb1);
1415}
1416
1195/* 1417/*
1196 * Multicasts and broadcasts go to each listener. 1418 * Multicasts and broadcasts go to each listener.
1197 * 1419 *
1198 * Note: called only from the BH handler context, 1420 * Note: called only from the BH handler context.
1199 * so we don't need to lock the hashes.
1200 */ 1421 */
1201static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 1422static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1202 struct udphdr *uh, 1423 struct udphdr *uh,
1203 __be32 saddr, __be32 daddr, 1424 __be32 saddr, __be32 daddr,
1204 struct udp_table *udptable) 1425 struct udp_table *udptable)
1205{ 1426{
1206 struct sock *sk; 1427 struct sock *sk, *stack[256 / sizeof(struct sock *)];
1207 struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); 1428 struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
1208 int dif; 1429 int dif;
1430 unsigned int i, count = 0;
1209 1431
1210 spin_lock(&hslot->lock); 1432 spin_lock(&hslot->lock);
1211 sk = sk_nulls_head(&hslot->head); 1433 sk = sk_nulls_head(&hslot->head);
1212 dif = skb->dev->ifindex; 1434 dif = skb->dev->ifindex;
1213 sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 1435 sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
1214 if (sk) { 1436 while (sk) {
1215 struct sock *sknext = NULL; 1437 stack[count++] = sk;
1216 1438 sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
1217 do { 1439 daddr, uh->source, saddr, dif);
1218 struct sk_buff *skb1 = skb; 1440 if (unlikely(count == ARRAY_SIZE(stack))) {
1219 1441 if (!sk)
1220 sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, 1442 break;
1221 daddr, uh->source, saddr, 1443 flush_stack(stack, count, skb, ~0);
1222 dif); 1444 count = 0;
1223 if (sknext) 1445 }
1224 skb1 = skb_clone(skb, GFP_ATOMIC); 1446 }
1225 1447 /*
1226 if (skb1) { 1448 * before releasing chain lock, we must take a reference on sockets
1227 int ret = udp_queue_rcv_skb(sk, skb1); 1449 */
1228 if (ret > 0) 1450 for (i = 0; i < count; i++)
1229 /* we should probably re-process instead 1451 sock_hold(stack[i]);
1230 * of dropping packets here. */ 1452
1231 kfree_skb(skb1);
1232 }
1233 sk = sknext;
1234 } while (sknext);
1235 } else
1236 consume_skb(skb);
1237 spin_unlock(&hslot->lock); 1453 spin_unlock(&hslot->lock);
1454
1455 /*
1456 * do the slow work with no lock held
1457 */
1458 if (count) {
1459 flush_stack(stack, count, skb, count - 1);
1460
1461 for (i = 0; i < count; i++)
1462 sock_put(stack[i]);
1463 } else {
1464 kfree_skb(skb);
1465 }
1238 return 0; 1466 return 0;
1239} 1467}
1240 1468
@@ -1844,7 +2072,7 @@ void __init udp_table_init(struct udp_table *table, const char *name)
1844 2072
1845 if (!CONFIG_BASE_SMALL) 2073 if (!CONFIG_BASE_SMALL)
1846 table->hash = alloc_large_system_hash(name, 2074 table->hash = alloc_large_system_hash(name,
1847 sizeof(struct udp_hslot), 2075 2 * sizeof(struct udp_hslot),
1848 uhash_entries, 2076 uhash_entries,
1849 21, /* one slot per 2 MB */ 2077 21, /* one slot per 2 MB */
1850 0, 2078 0,
@@ -1856,16 +2084,23 @@ void __init udp_table_init(struct udp_table *table, const char *name)
1856 */ 2084 */
1857 if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) { 2085 if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
1858 table->hash = kmalloc(UDP_HTABLE_SIZE_MIN * 2086 table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
1859 sizeof(struct udp_hslot), GFP_KERNEL); 2087 2 * sizeof(struct udp_hslot), GFP_KERNEL);
1860 if (!table->hash) 2088 if (!table->hash)
1861 panic(name); 2089 panic(name);
1862 table->log = ilog2(UDP_HTABLE_SIZE_MIN); 2090 table->log = ilog2(UDP_HTABLE_SIZE_MIN);
1863 table->mask = UDP_HTABLE_SIZE_MIN - 1; 2091 table->mask = UDP_HTABLE_SIZE_MIN - 1;
1864 } 2092 }
2093 table->hash2 = table->hash + (table->mask + 1);
1865 for (i = 0; i <= table->mask; i++) { 2094 for (i = 0; i <= table->mask; i++) {
1866 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); 2095 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
2096 table->hash[i].count = 0;
1867 spin_lock_init(&table->hash[i].lock); 2097 spin_lock_init(&table->hash[i].lock);
1868 } 2098 }
2099 for (i = 0; i <= table->mask; i++) {
2100 INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
2101 table->hash2[i].count = 0;
2102 spin_lock_init(&table->hash2[i].lock);
2103 }
1869} 2104}
1870 2105
1871void __init udp_init(void) 2106void __init udp_init(void)
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 470c504b9554..66f79513f4a5 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -64,7 +64,6 @@ static struct inet_protosw udplite4_protosw = {
64 .protocol = IPPROTO_UDPLITE, 64 .protocol = IPPROTO_UDPLITE,
65 .prot = &udplite_prot, 65 .prot = &udplite_prot,
66 .ops = &inet_dgram_ops, 66 .ops = &inet_dgram_ops,
67 .capability = -1,
68 .no_check = 0, /* must checksum (RFC 3828) */ 67 .no_check = 0, /* must checksum (RFC 3828) */
69 .flags = INET_PROTOSW_PERMANENT, 68 .flags = INET_PROTOSW_PERMANENT,
70}; 69};
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 918648409612..b1ce8fc62049 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -481,9 +481,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
481 struct net_device *dev; 481 struct net_device *dev;
482 struct inet6_dev *idev; 482 struct inet6_dev *idev;
483 483
484 read_lock(&dev_base_lock); 484 rcu_read_lock();
485 for_each_netdev(net, dev) { 485 for_each_netdev_rcu(net, dev) {
486 rcu_read_lock();
487 idev = __in6_dev_get(dev); 486 idev = __in6_dev_get(dev);
488 if (idev) { 487 if (idev) {
489 int changed = (!idev->cnf.forwarding) ^ (!newf); 488 int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -491,9 +490,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
491 if (changed) 490 if (changed)
492 dev_forward_change(idev); 491 dev_forward_change(idev);
493 } 492 }
494 rcu_read_unlock();
495 } 493 }
496 read_unlock(&dev_base_lock); 494 rcu_read_unlock();
497} 495}
498 496
499static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 497static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
@@ -1137,10 +1135,9 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
1137 hiscore->rule = -1; 1135 hiscore->rule = -1;
1138 hiscore->ifa = NULL; 1136 hiscore->ifa = NULL;
1139 1137
1140 read_lock(&dev_base_lock);
1141 rcu_read_lock(); 1138 rcu_read_lock();
1142 1139
1143 for_each_netdev(net, dev) { 1140 for_each_netdev_rcu(net, dev) {
1144 struct inet6_dev *idev; 1141 struct inet6_dev *idev;
1145 1142
1146 /* Candidate Source Address (section 4) 1143 /* Candidate Source Address (section 4)
@@ -1235,7 +1232,6 @@ try_nextdev:
1235 read_unlock_bh(&idev->lock); 1232 read_unlock_bh(&idev->lock);
1236 } 1233 }
1237 rcu_read_unlock(); 1234 rcu_read_unlock();
1238 read_unlock(&dev_base_lock);
1239 1235
1240 if (!hiscore->ifa) 1236 if (!hiscore->ifa)
1241 return -EADDRNOTAVAIL; 1237 return -EADDRNOTAVAIL;
@@ -3485,85 +3481,114 @@ enum addr_type_t
3485 ANYCAST_ADDR, 3481 ANYCAST_ADDR,
3486}; 3482};
3487 3483
3484/* called with rcu_read_lock() */
3485static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3486 struct netlink_callback *cb, enum addr_type_t type,
3487 int s_ip_idx, int *p_ip_idx)
3488{
3489 struct inet6_ifaddr *ifa;
3490 struct ifmcaddr6 *ifmca;
3491 struct ifacaddr6 *ifaca;
3492 int err = 1;
3493 int ip_idx = *p_ip_idx;
3494
3495 read_lock_bh(&idev->lock);
3496 switch (type) {
3497 case UNICAST_ADDR:
3498 /* unicast address incl. temp addr */
3499 for (ifa = idev->addr_list; ifa;
3500 ifa = ifa->if_next, ip_idx++) {
3501 if (ip_idx < s_ip_idx)
3502 continue;
3503 err = inet6_fill_ifaddr(skb, ifa,
3504 NETLINK_CB(cb->skb).pid,
3505 cb->nlh->nlmsg_seq,
3506 RTM_NEWADDR,
3507 NLM_F_MULTI);
3508 if (err <= 0)
3509 break;
3510 }
3511 break;
3512 case MULTICAST_ADDR:
3513 /* multicast address */
3514 for (ifmca = idev->mc_list; ifmca;
3515 ifmca = ifmca->next, ip_idx++) {
3516 if (ip_idx < s_ip_idx)
3517 continue;
3518 err = inet6_fill_ifmcaddr(skb, ifmca,
3519 NETLINK_CB(cb->skb).pid,
3520 cb->nlh->nlmsg_seq,
3521 RTM_GETMULTICAST,
3522 NLM_F_MULTI);
3523 if (err <= 0)
3524 break;
3525 }
3526 break;
3527 case ANYCAST_ADDR:
3528 /* anycast address */
3529 for (ifaca = idev->ac_list; ifaca;
3530 ifaca = ifaca->aca_next, ip_idx++) {
3531 if (ip_idx < s_ip_idx)
3532 continue;
3533 err = inet6_fill_ifacaddr(skb, ifaca,
3534 NETLINK_CB(cb->skb).pid,
3535 cb->nlh->nlmsg_seq,
3536 RTM_GETANYCAST,
3537 NLM_F_MULTI);
3538 if (err <= 0)
3539 break;
3540 }
3541 break;
3542 default:
3543 break;
3544 }
3545 read_unlock_bh(&idev->lock);
3546 *p_ip_idx = ip_idx;
3547 return err;
3548}
3549
3488static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, 3550static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3489 enum addr_type_t type) 3551 enum addr_type_t type)
3490{ 3552{
3553 struct net *net = sock_net(skb->sk);
3554 int h, s_h;
3491 int idx, ip_idx; 3555 int idx, ip_idx;
3492 int s_idx, s_ip_idx; 3556 int s_idx, s_ip_idx;
3493 int err = 1;
3494 struct net_device *dev; 3557 struct net_device *dev;
3495 struct inet6_dev *idev = NULL; 3558 struct inet6_dev *idev;
3496 struct inet6_ifaddr *ifa; 3559 struct hlist_head *head;
3497 struct ifmcaddr6 *ifmca; 3560 struct hlist_node *node;
3498 struct ifacaddr6 *ifaca;
3499 struct net *net = sock_net(skb->sk);
3500
3501 s_idx = cb->args[0];
3502 s_ip_idx = ip_idx = cb->args[1];
3503 3561
3504 idx = 0; 3562 s_h = cb->args[0];
3505 for_each_netdev(net, dev) { 3563 s_idx = idx = cb->args[1];
3506 if (idx < s_idx) 3564 s_ip_idx = ip_idx = cb->args[2];
3507 goto cont;
3508 if (idx > s_idx)
3509 s_ip_idx = 0;
3510 ip_idx = 0;
3511 if ((idev = in6_dev_get(dev)) == NULL)
3512 goto cont;
3513 read_lock_bh(&idev->lock);
3514 switch (type) {
3515 case UNICAST_ADDR:
3516 /* unicast address incl. temp addr */
3517 for (ifa = idev->addr_list; ifa;
3518 ifa = ifa->if_next, ip_idx++) {
3519 if (ip_idx < s_ip_idx)
3520 continue;
3521 err = inet6_fill_ifaddr(skb, ifa,
3522 NETLINK_CB(cb->skb).pid,
3523 cb->nlh->nlmsg_seq,
3524 RTM_NEWADDR,
3525 NLM_F_MULTI);
3526 }
3527 break;
3528 case MULTICAST_ADDR:
3529 /* multicast address */
3530 for (ifmca = idev->mc_list; ifmca;
3531 ifmca = ifmca->next, ip_idx++) {
3532 if (ip_idx < s_ip_idx)
3533 continue;
3534 err = inet6_fill_ifmcaddr(skb, ifmca,
3535 NETLINK_CB(cb->skb).pid,
3536 cb->nlh->nlmsg_seq,
3537 RTM_GETMULTICAST,
3538 NLM_F_MULTI);
3539 }
3540 break;
3541 case ANYCAST_ADDR:
3542 /* anycast address */
3543 for (ifaca = idev->ac_list; ifaca;
3544 ifaca = ifaca->aca_next, ip_idx++) {
3545 if (ip_idx < s_ip_idx)
3546 continue;
3547 err = inet6_fill_ifacaddr(skb, ifaca,
3548 NETLINK_CB(cb->skb).pid,
3549 cb->nlh->nlmsg_seq,
3550 RTM_GETANYCAST,
3551 NLM_F_MULTI);
3552 }
3553 break;
3554 default:
3555 break;
3556 }
3557 read_unlock_bh(&idev->lock);
3558 in6_dev_put(idev);
3559 3565
3560 if (err <= 0) 3566 rcu_read_lock();
3561 break; 3567 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3568 idx = 0;
3569 head = &net->dev_index_head[h];
3570 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3571 if (idx < s_idx)
3572 goto cont;
3573 if (idx > s_idx)
3574 s_ip_idx = 0;
3575 ip_idx = 0;
3576 if ((idev = __in6_dev_get(dev)) == NULL)
3577 goto cont;
3578
3579 if (in6_dump_addrs(idev, skb, cb, type,
3580 s_ip_idx, &ip_idx) <= 0)
3581 goto done;
3562cont: 3582cont:
3563 idx++; 3583 idx++;
3584 }
3564 } 3585 }
3565 cb->args[0] = idx; 3586done:
3566 cb->args[1] = ip_idx; 3587 rcu_read_unlock();
3588 cb->args[0] = h;
3589 cb->args[1] = idx;
3590 cb->args[2] = ip_idx;
3591
3567 return skb->len; 3592 return skb->len;
3568} 3593}
3569 3594
@@ -3827,28 +3852,39 @@ nla_put_failure:
3827static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 3852static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3828{ 3853{
3829 struct net *net = sock_net(skb->sk); 3854 struct net *net = sock_net(skb->sk);
3830 int idx, err; 3855 int h, s_h;
3831 int s_idx = cb->args[0]; 3856 int idx = 0, s_idx;
3832 struct net_device *dev; 3857 struct net_device *dev;
3833 struct inet6_dev *idev; 3858 struct inet6_dev *idev;
3859 struct hlist_head *head;
3860 struct hlist_node *node;
3834 3861
3835 read_lock(&dev_base_lock); 3862 s_h = cb->args[0];
3836 idx = 0; 3863 s_idx = cb->args[1];
3837 for_each_netdev(net, dev) { 3864
3838 if (idx < s_idx) 3865 rcu_read_lock();
3839 goto cont; 3866 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3840 if ((idev = in6_dev_get(dev)) == NULL) 3867 idx = 0;
3841 goto cont; 3868 head = &net->dev_index_head[h];
3842 err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, 3869 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3843 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); 3870 if (idx < s_idx)
3844 in6_dev_put(idev); 3871 goto cont;
3845 if (err <= 0) 3872 idev = __in6_dev_get(dev);
3846 break; 3873 if (!idev)
3874 goto cont;
3875 if (inet6_fill_ifinfo(skb, idev,
3876 NETLINK_CB(cb->skb).pid,
3877 cb->nlh->nlmsg_seq,
3878 RTM_NEWLINK, NLM_F_MULTI) <= 0)
3879 goto out;
3847cont: 3880cont:
3848 idx++; 3881 idx++;
3882 }
3849 } 3883 }
3850 read_unlock(&dev_base_lock); 3884out:
3851 cb->args[0] = idx; 3885 rcu_read_unlock();
3886 cb->args[1] = idx;
3887 cb->args[0] = h;
3852 3888
3853 return skb->len; 3889 return skb->len;
3854} 3890}
@@ -4052,9 +4088,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
4052 struct net_device *dev; 4088 struct net_device *dev;
4053 struct inet6_dev *idev; 4089 struct inet6_dev *idev;
4054 4090
4055 read_lock(&dev_base_lock); 4091 rcu_read_lock();
4056 for_each_netdev(net, dev) { 4092 for_each_netdev_rcu(net, dev) {
4057 rcu_read_lock();
4058 idev = __in6_dev_get(dev); 4093 idev = __in6_dev_get(dev);
4059 if (idev) { 4094 if (idev) {
4060 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 4095 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -4062,9 +4097,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
4062 if (changed) 4097 if (changed)
4063 dev_disable_change(idev); 4098 dev_disable_change(idev);
4064 } 4099 }
4065 rcu_read_unlock();
4066 } 4100 }
4067 read_unlock(&dev_base_lock); 4101 rcu_read_unlock();
4068} 4102}
4069 4103
4070static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) 4104static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
@@ -4464,7 +4498,7 @@ static int addrconf_init_net(struct net *net)
4464 all = &ipv6_devconf; 4498 all = &ipv6_devconf;
4465 dflt = &ipv6_devconf_dflt; 4499 dflt = &ipv6_devconf_dflt;
4466 4500
4467 if (net != &init_net) { 4501 if (!net_eq(net, &init_net)) {
4468 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); 4502 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
4469 if (all == NULL) 4503 if (all == NULL)
4470 goto err_alloc_all; 4504 goto err_alloc_all;
@@ -4512,7 +4546,7 @@ static void addrconf_exit_net(struct net *net)
4512 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); 4546 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
4513 __addrconf_sysctl_unregister(net->ipv6.devconf_all); 4547 __addrconf_sysctl_unregister(net->ipv6.devconf_all);
4514#endif 4548#endif
4515 if (net != &init_net) { 4549 if (!net_eq(net, &init_net)) {
4516 kfree(net->ipv6.devconf_dflt); 4550 kfree(net->ipv6.devconf_dflt);
4517 kfree(net->ipv6.devconf_all); 4551 kfree(net->ipv6.devconf_all);
4518 } 4552 }
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b6d058818673..12e69d364dd5 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -95,7 +95,8 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
95 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); 95 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
96} 96}
97 97
98static int inet6_create(struct net *net, struct socket *sock, int protocol) 98static int inet6_create(struct net *net, struct socket *sock, int protocol,
99 int kern)
99{ 100{
100 struct inet_sock *inet; 101 struct inet_sock *inet;
101 struct ipv6_pinfo *np; 102 struct ipv6_pinfo *np;
@@ -158,7 +159,7 @@ lookup_protocol:
158 } 159 }
159 160
160 err = -EPERM; 161 err = -EPERM;
161 if (answer->capability > 0 && !capable(answer->capability)) 162 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
162 goto out_rcu_unlock; 163 goto out_rcu_unlock;
163 164
164 sock->ops = answer->ops; 165 sock->ops = answer->ops;
@@ -314,6 +315,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
314 if (addr_type != IPV6_ADDR_ANY) { 315 if (addr_type != IPV6_ADDR_ANY) {
315 struct net_device *dev = NULL; 316 struct net_device *dev = NULL;
316 317
318 rcu_read_lock();
317 if (addr_type & IPV6_ADDR_LINKLOCAL) { 319 if (addr_type & IPV6_ADDR_LINKLOCAL) {
318 if (addr_len >= sizeof(struct sockaddr_in6) && 320 if (addr_len >= sizeof(struct sockaddr_in6) &&
319 addr->sin6_scope_id) { 321 addr->sin6_scope_id) {
@@ -326,12 +328,12 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
326 /* Binding to link-local address requires an interface */ 328 /* Binding to link-local address requires an interface */
327 if (!sk->sk_bound_dev_if) { 329 if (!sk->sk_bound_dev_if) {
328 err = -EINVAL; 330 err = -EINVAL;
329 goto out; 331 goto out_unlock;
330 } 332 }
331 dev = dev_get_by_index(net, sk->sk_bound_dev_if); 333 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
332 if (!dev) { 334 if (!dev) {
333 err = -ENODEV; 335 err = -ENODEV;
334 goto out; 336 goto out_unlock;
335 } 337 }
336 } 338 }
337 339
@@ -342,14 +344,11 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
342 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 344 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
343 if (!ipv6_chk_addr(net, &addr->sin6_addr, 345 if (!ipv6_chk_addr(net, &addr->sin6_addr,
344 dev, 0)) { 346 dev, 0)) {
345 if (dev)
346 dev_put(dev);
347 err = -EADDRNOTAVAIL; 347 err = -EADDRNOTAVAIL;
348 goto out; 348 goto out_unlock;
349 } 349 }
350 } 350 }
351 if (dev) 351 rcu_read_unlock();
352 dev_put(dev);
353 } 352 }
354 } 353 }
355 354
@@ -381,6 +380,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
381out: 380out:
382 release_sock(sk); 381 release_sock(sk);
383 return err; 382 return err;
383out_unlock:
384 rcu_read_unlock();
385 goto out;
384} 386}
385 387
386EXPORT_SYMBOL(inet6_bind); 388EXPORT_SYMBOL(inet6_bind);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 0f526f8ea518..c2f300c314be 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -667,7 +667,7 @@ static int ah6_init_state(struct xfrm_state *x)
667 } 667 }
668 668
669 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 669 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
670 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 670 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
671 671
672 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 672 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
673 673
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 1ae58bec1de0..f1c74c8ef9de 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -404,13 +404,13 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
404 404
405 if (dev) 405 if (dev)
406 return ipv6_chk_acast_dev(dev, addr); 406 return ipv6_chk_acast_dev(dev, addr);
407 read_lock(&dev_base_lock); 407 rcu_read_lock();
408 for_each_netdev(net, dev) 408 for_each_netdev_rcu(net, dev)
409 if (ipv6_chk_acast_dev(dev, addr)) { 409 if (ipv6_chk_acast_dev(dev, addr)) {
410 found = 1; 410 found = 1;
411 break; 411 break;
412 } 412 }
413 read_unlock(&dev_base_lock); 413 rcu_read_unlock();
414 return found; 414 return found;
415} 415}
416 416
@@ -431,9 +431,9 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
431 struct net *net = seq_file_net(seq); 431 struct net *net = seq_file_net(seq);
432 432
433 state->idev = NULL; 433 state->idev = NULL;
434 for_each_netdev(net, state->dev) { 434 for_each_netdev_rcu(net, state->dev) {
435 struct inet6_dev *idev; 435 struct inet6_dev *idev;
436 idev = in6_dev_get(state->dev); 436 idev = __in6_dev_get(state->dev);
437 if (!idev) 437 if (!idev)
438 continue; 438 continue;
439 read_lock_bh(&idev->lock); 439 read_lock_bh(&idev->lock);
@@ -443,7 +443,6 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
443 break; 443 break;
444 } 444 }
445 read_unlock_bh(&idev->lock); 445 read_unlock_bh(&idev->lock);
446 in6_dev_put(idev);
447 } 446 }
448 return im; 447 return im;
449} 448}
@@ -454,16 +453,15 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im
454 453
455 im = im->aca_next; 454 im = im->aca_next;
456 while (!im) { 455 while (!im) {
457 if (likely(state->idev != NULL)) { 456 if (likely(state->idev != NULL))
458 read_unlock_bh(&state->idev->lock); 457 read_unlock_bh(&state->idev->lock);
459 in6_dev_put(state->idev); 458
460 } 459 state->dev = next_net_device_rcu(state->dev);
461 state->dev = next_net_device(state->dev);
462 if (!state->dev) { 460 if (!state->dev) {
463 state->idev = NULL; 461 state->idev = NULL;
464 break; 462 break;
465 } 463 }
466 state->idev = in6_dev_get(state->dev); 464 state->idev = __in6_dev_get(state->dev);
467 if (!state->idev) 465 if (!state->idev)
468 continue; 466 continue;
469 read_lock_bh(&state->idev->lock); 467 read_lock_bh(&state->idev->lock);
@@ -482,29 +480,30 @@ static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos)
482} 480}
483 481
484static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) 482static void *ac6_seq_start(struct seq_file *seq, loff_t *pos)
485 __acquires(dev_base_lock) 483 __acquires(RCU)
486{ 484{
487 read_lock(&dev_base_lock); 485 rcu_read_lock();
488 return ac6_get_idx(seq, *pos); 486 return ac6_get_idx(seq, *pos);
489} 487}
490 488
491static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) 489static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
492{ 490{
493 struct ifacaddr6 *im; 491 struct ifacaddr6 *im = ac6_get_next(seq, v);
494 im = ac6_get_next(seq, v); 492
495 ++*pos; 493 ++*pos;
496 return im; 494 return im;
497} 495}
498 496
499static void ac6_seq_stop(struct seq_file *seq, void *v) 497static void ac6_seq_stop(struct seq_file *seq, void *v)
500 __releases(dev_base_lock) 498 __releases(RCU)
501{ 499{
502 struct ac6_iter_state *state = ac6_seq_private(seq); 500 struct ac6_iter_state *state = ac6_seq_private(seq);
501
503 if (likely(state->idev != NULL)) { 502 if (likely(state->idev != NULL)) {
504 read_unlock_bh(&state->idev->lock); 503 read_unlock_bh(&state->idev->lock);
505 in6_dev_put(state->idev); 504 state->idev = NULL;
506 } 505 }
507 read_unlock(&dev_base_lock); 506 rcu_read_unlock();
508} 507}
509 508
510static int ac6_seq_show(struct seq_file *seq, void *v) 509static int ac6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9f70452a69e7..e6f9cdf780fe 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -537,12 +537,17 @@ int datagram_send_ctl(struct net *net,
537 537
538 addr_type = __ipv6_addr_type(&src_info->ipi6_addr); 538 addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
539 539
540 rcu_read_lock();
540 if (fl->oif) { 541 if (fl->oif) {
541 dev = dev_get_by_index(net, fl->oif); 542 dev = dev_get_by_index_rcu(net, fl->oif);
542 if (!dev) 543 if (!dev) {
544 rcu_read_unlock();
543 return -ENODEV; 545 return -ENODEV;
544 } else if (addr_type & IPV6_ADDR_LINKLOCAL) 546 }
547 } else if (addr_type & IPV6_ADDR_LINKLOCAL) {
548 rcu_read_unlock();
545 return -EINVAL; 549 return -EINVAL;
550 }
546 551
547 if (addr_type != IPV6_ADDR_ANY) { 552 if (addr_type != IPV6_ADDR_ANY) {
548 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 553 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
@@ -553,8 +558,7 @@ int datagram_send_ctl(struct net *net,
553 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); 558 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
554 } 559 }
555 560
556 if (dev) 561 rcu_read_unlock();
557 dev_put(dev);
558 562
559 if (err) 563 if (err)
560 goto exit_f; 564 goto exit_f;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index af597c73ebe9..668a46b655e6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -473,7 +473,7 @@ static int esp_init_authenc(struct xfrm_state *x)
473 } 473 }
474 474
475 err = crypto_aead_setauthsize( 475 err = crypto_aead_setauthsize(
476 aead, aalg_desc->uinfo.auth.icv_truncbits / 8); 476 aead, x->aalg->alg_trunc_len / 8);
477 if (err) 477 if (err)
478 goto free_key; 478 goto free_key;
479 } 479 }
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 00a7a5e4ac97..b7aa7c64cc4a 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -264,44 +264,36 @@ static struct fib_rules_ops fib6_rules_ops_template = {
264 264
265static int fib6_rules_net_init(struct net *net) 265static int fib6_rules_net_init(struct net *net)
266{ 266{
267 struct fib_rules_ops *ops;
267 int err = -ENOMEM; 268 int err = -ENOMEM;
268 269
269 net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template, 270 ops = fib_rules_register(&fib6_rules_ops_template, net);
270 sizeof(*net->ipv6.fib6_rules_ops), 271 if (IS_ERR(ops))
271 GFP_KERNEL); 272 return PTR_ERR(ops);
272 if (!net->ipv6.fib6_rules_ops) 273 net->ipv6.fib6_rules_ops = ops;
273 goto out;
274 274
275 net->ipv6.fib6_rules_ops->fro_net = net;
276 INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list);
277 275
278 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0, 276 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
279 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); 277 RT6_TABLE_LOCAL, 0);
280 if (err) 278 if (err)
281 goto out_fib6_rules_ops; 279 goto out_fib6_rules_ops;
282 280
283 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 281 err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
284 0x7FFE, RT6_TABLE_MAIN, 0); 282 0x7FFE, RT6_TABLE_MAIN, 0);
285 if (err) 283 if (err)
286 goto out_fib6_default_rule_add; 284 goto out_fib6_rules_ops;
287 285
288 err = fib_rules_register(net->ipv6.fib6_rules_ops);
289 if (err)
290 goto out_fib6_default_rule_add;
291out: 286out:
292 return err; 287 return err;
293 288
294out_fib6_default_rule_add:
295 fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops);
296out_fib6_rules_ops: 289out_fib6_rules_ops:
297 kfree(net->ipv6.fib6_rules_ops); 290 fib_rules_unregister(ops);
298 goto out; 291 goto out;
299} 292}
300 293
301static void fib6_rules_net_exit(struct net *net) 294static void fib6_rules_net_exit(struct net *net)
302{ 295{
303 fib_rules_unregister(net->ipv6.fib6_rules_ops); 296 fib_rules_unregister(net->ipv6.fib6_rules_ops);
304 kfree(net->ipv6.fib6_rules_ops);
305} 297}
306 298
307static struct pernet_operations fib6_rules_net_ops = { 299static struct pernet_operations fib6_rules_net_ops = {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 7712578bdc66..6e7bffa2205e 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -67,7 +67,7 @@ static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
67 struct ip6_flowlabel *fl; 67 struct ip6_flowlabel *fl;
68 68
69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) { 69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
70 if (fl->label == label && fl->fl_net == net) 70 if (fl->label == label && net_eq(fl->fl_net, net))
71 return fl; 71 return fl;
72 } 72 }
73 return NULL; 73 return NULL;
@@ -163,7 +163,8 @@ static void ip6_fl_purge(struct net *net)
163 struct ip6_flowlabel *fl, **flp; 163 struct ip6_flowlabel *fl, **flp;
164 flp = &fl_ht[i]; 164 flp = &fl_ht[i];
165 while ((fl = *flp) != NULL) { 165 while ((fl = *flp) != NULL) {
166 if (fl->fl_net == net && atomic_read(&fl->users) == 0) { 166 if (net_eq(fl->fl_net, net) &&
167 atomic_read(&fl->users) == 0) {
167 *flp = fl->next; 168 *flp = fl->next;
168 fl_free(fl); 169 fl_free(fl);
169 atomic_dec(&fl_size); 170 atomic_dec(&fl_size);
@@ -377,8 +378,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
377 goto done; 378 goto done;
378 fl->share = freq->flr_share; 379 fl->share = freq->flr_share;
379 addr_type = ipv6_addr_type(&freq->flr_dst); 380 addr_type = ipv6_addr_type(&freq->flr_dst);
380 if ((addr_type&IPV6_ADDR_MAPPED) 381 if ((addr_type & IPV6_ADDR_MAPPED) ||
381 || addr_type == IPV6_ADDR_ANY) { 382 addr_type == IPV6_ADDR_ANY) {
382 err = -EINVAL; 383 err = -EINVAL;
383 goto done; 384 goto done;
384 } 385 }
@@ -421,8 +422,8 @@ static int mem_check(struct sock *sk)
421 422
422 if (room <= 0 || 423 if (room <= 0 ||
423 ((count >= FL_MAX_PER_SOCK || 424 ((count >= FL_MAX_PER_SOCK ||
424 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) 425 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
425 && !capable(CAP_NET_ADMIN))) 426 !capable(CAP_NET_ADMIN)))
426 return -ENOBUFS; 427 return -ENOBUFS;
427 428
428 return 0; 429 return 0;
@@ -630,7 +631,7 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
630 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { 631 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
631 fl = fl_ht[state->bucket]; 632 fl = fl_ht[state->bucket];
632 633
633 while (fl && fl->fl_net != net) 634 while (fl && !net_eq(fl->fl_net, net))
634 fl = fl->next; 635 fl = fl->next;
635 if (fl) 636 if (fl)
636 break; 637 break;
@@ -645,7 +646,7 @@ static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flo
645 646
646 fl = fl->next; 647 fl = fl->next;
647try_again: 648try_again:
648 while (fl && fl->fl_net != net) 649 while (fl && !net_eq(fl->fl_net, net))
649 fl = fl->next; 650 fl = fl->next;
650 651
651 while (!fl) { 652 while (!fl) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6c1b5c98e818..d453d07b0dfe 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -78,7 +78,7 @@ static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 78static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 79static void ip6_tnl_dev_setup(struct net_device *dev);
80 80
81static int ip6_tnl_net_id; 81static int ip6_tnl_net_id __read_mostly;
82struct ip6_tnl_net { 82struct ip6_tnl_net {
83 /* the IPv6 tunnel fallback device */ 83 /* the IPv6 tunnel fallback device */
84 struct net_device *fb_tnl_dev; 84 struct net_device *fb_tnl_dev;
@@ -658,6 +658,7 @@ static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
658 IP6_ECN_set_ce(ipv6_hdr(skb)); 658 IP6_ECN_set_ce(ipv6_hdr(skb));
659} 659}
660 660
661/* called with rcu_read_lock() */
661static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) 662static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
662{ 663{
663 struct ip6_tnl_parm *p = &t->parms; 664 struct ip6_tnl_parm *p = &t->parms;
@@ -668,15 +669,13 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
668 struct net_device *ldev = NULL; 669 struct net_device *ldev = NULL;
669 670
670 if (p->link) 671 if (p->link)
671 ldev = dev_get_by_index(net, p->link); 672 ldev = dev_get_by_index_rcu(net, p->link);
672 673
673 if ((ipv6_addr_is_multicast(&p->laddr) || 674 if ((ipv6_addr_is_multicast(&p->laddr) ||
674 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && 675 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
675 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) 676 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
676 ret = 1; 677 ret = 1;
677 678
678 if (ldev)
679 dev_put(ldev);
680 } 679 }
681 return ret; 680 return ret;
682} 681}
@@ -804,8 +803,9 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
804 if (p->flags & IP6_TNL_F_CAP_XMIT) { 803 if (p->flags & IP6_TNL_F_CAP_XMIT) {
805 struct net_device *ldev = NULL; 804 struct net_device *ldev = NULL;
806 805
806 rcu_read_lock();
807 if (p->link) 807 if (p->link)
808 ldev = dev_get_by_index(net, p->link); 808 ldev = dev_get_by_index_rcu(net, p->link);
809 809
810 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) 810 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
811 printk(KERN_WARNING 811 printk(KERN_WARNING
@@ -819,8 +819,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
819 p->name); 819 p->name);
820 else 820 else
821 ret = 1; 821 ret = 1;
822 if (ldev) 822 rcu_read_unlock();
823 dev_put(ldev);
824 } 823 }
825 return ret; 824 return ret;
826} 825}
@@ -1410,17 +1409,8 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1410 1409
1411static int ip6_tnl_init_net(struct net *net) 1410static int ip6_tnl_init_net(struct net *net)
1412{ 1411{
1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1413 int err; 1413 int err;
1414 struct ip6_tnl_net *ip6n;
1415
1416 err = -ENOMEM;
1417 ip6n = kzalloc(sizeof(struct ip6_tnl_net), GFP_KERNEL);
1418 if (ip6n == NULL)
1419 goto err_alloc;
1420
1421 err = net_assign_generic(net, ip6_tnl_net_id, ip6n);
1422 if (err < 0)
1423 goto err_assign;
1424 1414
1425 ip6n->tnls[0] = ip6n->tnls_wc; 1415 ip6n->tnls[0] = ip6n->tnls_wc;
1426 ip6n->tnls[1] = ip6n->tnls_r_l; 1416 ip6n->tnls[1] = ip6n->tnls_r_l;
@@ -1443,27 +1433,23 @@ static int ip6_tnl_init_net(struct net *net)
1443err_register: 1433err_register:
1444 free_netdev(ip6n->fb_tnl_dev); 1434 free_netdev(ip6n->fb_tnl_dev);
1445err_alloc_dev: 1435err_alloc_dev:
1446 /* nothing */
1447err_assign:
1448 kfree(ip6n);
1449err_alloc:
1450 return err; 1436 return err;
1451} 1437}
1452 1438
1453static void ip6_tnl_exit_net(struct net *net) 1439static void ip6_tnl_exit_net(struct net *net)
1454{ 1440{
1455 struct ip6_tnl_net *ip6n; 1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1456 1442
1457 ip6n = net_generic(net, ip6_tnl_net_id);
1458 rtnl_lock(); 1443 rtnl_lock();
1459 ip6_tnl_destroy_tunnels(ip6n); 1444 ip6_tnl_destroy_tunnels(ip6n);
1460 rtnl_unlock(); 1445 rtnl_unlock();
1461 kfree(ip6n);
1462} 1446}
1463 1447
1464static struct pernet_operations ip6_tnl_net_ops = { 1448static struct pernet_operations ip6_tnl_net_ops = {
1465 .init = ip6_tnl_init_net, 1449 .init = ip6_tnl_init_net,
1466 .exit = ip6_tnl_exit_net, 1450 .exit = ip6_tnl_exit_net,
1451 .id = &ip6_tnl_net_id,
1452 .size = sizeof(struct ip6_tnl_net),
1467}; 1453};
1468 1454
1469/** 1455/**
@@ -1488,7 +1474,7 @@ static int __init ip6_tunnel_init(void)
1488 goto unreg_ip4ip6; 1474 goto unreg_ip4ip6;
1489 } 1475 }
1490 1476
1491 err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops); 1477 err = register_pernet_device(&ip6_tnl_net_ops);
1492 if (err < 0) 1478 if (err < 0)
1493 goto err_pernet; 1479 goto err_pernet;
1494 return 0; 1480 return 0;
@@ -1512,7 +1498,7 @@ static void __exit ip6_tunnel_cleanup(void)
1512 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 1498 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1513 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); 1499 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
1514 1500
1515 unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops); 1501 unregister_pernet_device(&ip6_tnl_net_ops);
1516} 1502}
1517 1503
1518module_init(ip6_tunnel_init); 1504module_init(ip6_tunnel_init);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f9fcf690bd5d..1f9c44442e65 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2375,9 +2375,9 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2375 struct net *net = seq_file_net(seq); 2375 struct net *net = seq_file_net(seq);
2376 2376
2377 state->idev = NULL; 2377 state->idev = NULL;
2378 for_each_netdev(net, state->dev) { 2378 for_each_netdev_rcu(net, state->dev) {
2379 struct inet6_dev *idev; 2379 struct inet6_dev *idev;
2380 idev = in6_dev_get(state->dev); 2380 idev = __in6_dev_get(state->dev);
2381 if (!idev) 2381 if (!idev)
2382 continue; 2382 continue;
2383 read_lock_bh(&idev->lock); 2383 read_lock_bh(&idev->lock);
@@ -2387,7 +2387,6 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2387 break; 2387 break;
2388 } 2388 }
2389 read_unlock_bh(&idev->lock); 2389 read_unlock_bh(&idev->lock);
2390 in6_dev_put(idev);
2391 } 2390 }
2392 return im; 2391 return im;
2393} 2392}
@@ -2398,16 +2397,15 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
2398 2397
2399 im = im->next; 2398 im = im->next;
2400 while (!im) { 2399 while (!im) {
2401 if (likely(state->idev != NULL)) { 2400 if (likely(state->idev != NULL))
2402 read_unlock_bh(&state->idev->lock); 2401 read_unlock_bh(&state->idev->lock);
2403 in6_dev_put(state->idev); 2402
2404 } 2403 state->dev = next_net_device_rcu(state->dev);
2405 state->dev = next_net_device(state->dev);
2406 if (!state->dev) { 2404 if (!state->dev) {
2407 state->idev = NULL; 2405 state->idev = NULL;
2408 break; 2406 break;
2409 } 2407 }
2410 state->idev = in6_dev_get(state->dev); 2408 state->idev = __in6_dev_get(state->dev);
2411 if (!state->idev) 2409 if (!state->idev)
2412 continue; 2410 continue;
2413 read_lock_bh(&state->idev->lock); 2411 read_lock_bh(&state->idev->lock);
@@ -2426,31 +2424,31 @@ static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2426} 2424}
2427 2425
2428static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) 2426static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2429 __acquires(dev_base_lock) 2427 __acquires(RCU)
2430{ 2428{
2431 read_lock(&dev_base_lock); 2429 rcu_read_lock();
2432 return igmp6_mc_get_idx(seq, *pos); 2430 return igmp6_mc_get_idx(seq, *pos);
2433} 2431}
2434 2432
2435static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2433static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2436{ 2434{
2437 struct ifmcaddr6 *im; 2435 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2438 im = igmp6_mc_get_next(seq, v); 2436
2439 ++*pos; 2437 ++*pos;
2440 return im; 2438 return im;
2441} 2439}
2442 2440
2443static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) 2441static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2444 __releases(dev_base_lock) 2442 __releases(RCU)
2445{ 2443{
2446 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2444 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2445
2447 if (likely(state->idev != NULL)) { 2446 if (likely(state->idev != NULL)) {
2448 read_unlock_bh(&state->idev->lock); 2447 read_unlock_bh(&state->idev->lock);
2449 in6_dev_put(state->idev);
2450 state->idev = NULL; 2448 state->idev = NULL;
2451 } 2449 }
2452 state->dev = NULL; 2450 state->dev = NULL;
2453 read_unlock(&dev_base_lock); 2451 rcu_read_unlock();
2454} 2452}
2455 2453
2456static int igmp6_mc_seq_show(struct seq_file *seq, void *v) 2454static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
@@ -2507,9 +2505,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2507 2505
2508 state->idev = NULL; 2506 state->idev = NULL;
2509 state->im = NULL; 2507 state->im = NULL;
2510 for_each_netdev(net, state->dev) { 2508 for_each_netdev_rcu(net, state->dev) {
2511 struct inet6_dev *idev; 2509 struct inet6_dev *idev;
2512 idev = in6_dev_get(state->dev); 2510 idev = __in6_dev_get(state->dev);
2513 if (unlikely(idev == NULL)) 2511 if (unlikely(idev == NULL))
2514 continue; 2512 continue;
2515 read_lock_bh(&idev->lock); 2513 read_lock_bh(&idev->lock);
@@ -2525,7 +2523,6 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2525 spin_unlock_bh(&im->mca_lock); 2523 spin_unlock_bh(&im->mca_lock);
2526 } 2524 }
2527 read_unlock_bh(&idev->lock); 2525 read_unlock_bh(&idev->lock);
2528 in6_dev_put(idev);
2529 } 2526 }
2530 return psf; 2527 return psf;
2531} 2528}
@@ -2539,16 +2536,15 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
2539 spin_unlock_bh(&state->im->mca_lock); 2536 spin_unlock_bh(&state->im->mca_lock);
2540 state->im = state->im->next; 2537 state->im = state->im->next;
2541 while (!state->im) { 2538 while (!state->im) {
2542 if (likely(state->idev != NULL)) { 2539 if (likely(state->idev != NULL))
2543 read_unlock_bh(&state->idev->lock); 2540 read_unlock_bh(&state->idev->lock);
2544 in6_dev_put(state->idev); 2541
2545 } 2542 state->dev = next_net_device_rcu(state->dev);
2546 state->dev = next_net_device(state->dev);
2547 if (!state->dev) { 2543 if (!state->dev) {
2548 state->idev = NULL; 2544 state->idev = NULL;
2549 goto out; 2545 goto out;
2550 } 2546 }
2551 state->idev = in6_dev_get(state->dev); 2547 state->idev = __in6_dev_get(state->dev);
2552 if (!state->idev) 2548 if (!state->idev)
2553 continue; 2549 continue;
2554 read_lock_bh(&state->idev->lock); 2550 read_lock_bh(&state->idev->lock);
@@ -2573,9 +2569,9 @@ static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2573} 2569}
2574 2570
2575static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2571static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2576 __acquires(dev_base_lock) 2572 __acquires(RCU)
2577{ 2573{
2578 read_lock(&dev_base_lock); 2574 rcu_read_lock();
2579 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2575 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2580} 2576}
2581 2577
@@ -2591,7 +2587,7 @@ static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2591} 2587}
2592 2588
2593static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) 2589static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2594 __releases(dev_base_lock) 2590 __releases(RCU)
2595{ 2591{
2596 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2592 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2597 if (likely(state->im != NULL)) { 2593 if (likely(state->im != NULL)) {
@@ -2600,11 +2596,10 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2600 } 2596 }
2601 if (likely(state->idev != NULL)) { 2597 if (likely(state->idev != NULL)) {
2602 read_unlock_bh(&state->idev->lock); 2598 read_unlock_bh(&state->idev->lock);
2603 in6_dev_put(state->idev);
2604 state->idev = NULL; 2599 state->idev = NULL;
2605 } 2600 }
2606 state->dev = NULL; 2601 state->dev = NULL;
2607 read_unlock(&dev_base_lock); 2602 rcu_read_unlock();
2608} 2603}
2609 2604
2610static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) 2605static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 47a3623e7119..db4d5725cce8 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -501,7 +501,7 @@ ipq_rcv_nl_event(struct notifier_block *this,
501 501
502 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { 502 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
503 write_lock_bh(&queue_lock); 503 write_lock_bh(&queue_lock);
504 if ((n->net == &init_net) && (n->pid == peer_pid)) 504 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
505 __ipq_reset(); 505 __ipq_reset();
506 write_unlock_bh(&queue_lock); 506 write_unlock_bh(&queue_lock);
507 } 507 }
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index cb834ab7f071..926ce8eeffaf 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -249,7 +249,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
249 249
250 /* Raw sockets are IPv6 only */ 250 /* Raw sockets are IPv6 only */
251 if (addr_type == IPV6_ADDR_MAPPED) 251 if (addr_type == IPV6_ADDR_MAPPED)
252 return(-EADDRNOTAVAIL); 252 return -EADDRNOTAVAIL;
253 253
254 lock_sock(sk); 254 lock_sock(sk);
255 255
@@ -257,6 +257,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
257 if (sk->sk_state != TCP_CLOSE) 257 if (sk->sk_state != TCP_CLOSE)
258 goto out; 258 goto out;
259 259
260 rcu_read_lock();
260 /* Check if the address belongs to the host. */ 261 /* Check if the address belongs to the host. */
261 if (addr_type != IPV6_ADDR_ANY) { 262 if (addr_type != IPV6_ADDR_ANY) {
262 struct net_device *dev = NULL; 263 struct net_device *dev = NULL;
@@ -272,13 +273,13 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
272 273
273 /* Binding to link-local address requires an interface */ 274 /* Binding to link-local address requires an interface */
274 if (!sk->sk_bound_dev_if) 275 if (!sk->sk_bound_dev_if)
275 goto out; 276 goto out_unlock;
276 277
277 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); 278 err = -ENODEV;
278 if (!dev) { 279 dev = dev_get_by_index_rcu(sock_net(sk),
279 err = -ENODEV; 280 sk->sk_bound_dev_if);
280 goto out; 281 if (!dev)
281 } 282 goto out_unlock;
282 } 283 }
283 284
284 /* ipv4 addr of the socket is invalid. Only the 285 /* ipv4 addr of the socket is invalid. Only the
@@ -289,13 +290,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
289 err = -EADDRNOTAVAIL; 290 err = -EADDRNOTAVAIL;
290 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, 291 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
291 dev, 0)) { 292 dev, 0)) {
292 if (dev) 293 goto out_unlock;
293 dev_put(dev);
294 goto out;
295 } 294 }
296 } 295 }
297 if (dev)
298 dev_put(dev);
299 } 296 }
300 297
301 inet->inet_rcv_saddr = inet->inet_saddr = v4addr; 298 inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
@@ -303,6 +300,8 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
303 if (!(addr_type & IPV6_ADDR_MULTICAST)) 300 if (!(addr_type & IPV6_ADDR_MULTICAST))
304 ipv6_addr_copy(&np->saddr, &addr->sin6_addr); 301 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
305 err = 0; 302 err = 0;
303out_unlock:
304 rcu_read_unlock();
306out: 305out:
307 release_sock(sk); 306 release_sock(sk);
308 return err; 307 return err;
@@ -1336,7 +1335,6 @@ static struct inet_protosw rawv6_protosw = {
1336 .protocol = IPPROTO_IP, /* wild card */ 1335 .protocol = IPPROTO_IP, /* wild card */
1337 .prot = &rawv6_prot, 1336 .prot = &rawv6_prot,
1338 .ops = &inet6_sockraw_ops, 1337 .ops = &inet6_sockraw_ops,
1339 .capability = CAP_NET_RAW,
1340 .no_check = UDP_CSUM_DEFAULT, 1338 .no_check = UDP_CSUM_DEFAULT,
1341 .flags = INET_PROTOSW_REUSE, 1339 .flags = INET_PROTOSW_REUSE,
1342}; 1340};
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index da5bd0ed83df..45efc39753e2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -208,18 +208,17 @@ static void ip6_frag_expire(unsigned long data)
208 fq_kill(fq); 208 fq_kill(fq);
209 209
210 net = container_of(fq->q.net, struct net, ipv6.frags); 210 net = container_of(fq->q.net, struct net, ipv6.frags);
211 dev = dev_get_by_index(net, fq->iif); 211 rcu_read_lock();
212 dev = dev_get_by_index_rcu(net, fq->iif);
212 if (!dev) 213 if (!dev)
213 goto out; 214 goto out_rcu_unlock;
214 215
215 rcu_read_lock();
216 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 216 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
217 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 217 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
218 rcu_read_unlock();
219 218
220 /* Don't send error if the first segment did not arrive. */ 219 /* Don't send error if the first segment did not arrive. */
221 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 220 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
222 goto out; 221 goto out_rcu_unlock;
223 222
224 /* 223 /*
225 But use as source device on which LAST ARRIVED 224 But use as source device on which LAST ARRIVED
@@ -228,9 +227,9 @@ static void ip6_frag_expire(unsigned long data)
228 */ 227 */
229 fq->q.fragments->dev = dev; 228 fq->q.fragments->dev = dev;
230 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 229 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
230out_rcu_unlock:
231 rcu_read_unlock();
231out: 232out:
232 if (dev)
233 dev_put(dev);
234 spin_unlock(&fq->q.lock); 233 spin_unlock(&fq->q.lock);
235 fq_put(fq); 234 fq_put(fq);
236} 235}
@@ -682,7 +681,7 @@ static int ip6_frags_ns_sysctl_register(struct net *net)
682 struct ctl_table_header *hdr; 681 struct ctl_table_header *hdr;
683 682
684 table = ip6_frags_ns_ctl_table; 683 table = ip6_frags_ns_ctl_table;
685 if (net != &init_net) { 684 if (!net_eq(net, &init_net)) {
686 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 685 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
687 if (table == NULL) 686 if (table == NULL)
688 goto err_alloc; 687 goto err_alloc;
@@ -700,7 +699,7 @@ static int ip6_frags_ns_sysctl_register(struct net *net)
700 return 0; 699 return 0;
701 700
702err_reg: 701err_reg:
703 if (net != &init_net) 702 if (!net_eq(net, &init_net))
704 kfree(table); 703 kfree(table);
705err_alloc: 704err_alloc:
706 return -ENOMEM; 705 return -ENOMEM;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 2362a3397e91..976e68244b99 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -66,7 +66,7 @@ static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 66static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 67static void ipip6_tunnel_setup(struct net_device *dev);
68 68
69static int sit_net_id; 69static int sit_net_id __read_mostly;
70struct sit_net { 70struct sit_net {
71 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 71 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
72 struct ip_tunnel *tunnels_r[HASH_SIZE]; 72 struct ip_tunnel *tunnels_r[HASH_SIZE];
@@ -637,6 +637,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
637 struct iphdr *tiph = &tunnel->parms.iph; 637 struct iphdr *tiph = &tunnel->parms.iph;
638 struct ipv6hdr *iph6 = ipv6_hdr(skb); 638 struct ipv6hdr *iph6 = ipv6_hdr(skb);
639 u8 tos = tunnel->parms.iph.tos; 639 u8 tos = tunnel->parms.iph.tos;
640 __be16 df = tiph->frag_off;
640 struct rtable *rt; /* Route to the other host */ 641 struct rtable *rt; /* Route to the other host */
641 struct net_device *tdev; /* Device to other host */ 642 struct net_device *tdev; /* Device to other host */
642 struct iphdr *iph; /* Our new IP header */ 643 struct iphdr *iph; /* Our new IP header */
@@ -726,25 +727,28 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
726 goto tx_error; 727 goto tx_error;
727 } 728 }
728 729
729 if (tiph->frag_off) 730 if (df) {
730 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 731 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
731 else
732 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
733 732
734 if (mtu < 68) { 733 if (mtu < 68) {
735 stats->collisions++; 734 stats->collisions++;
736 ip_rt_put(rt); 735 ip_rt_put(rt);
737 goto tx_error; 736 goto tx_error;
738 } 737 }
739 if (mtu < IPV6_MIN_MTU)
740 mtu = IPV6_MIN_MTU;
741 if (tunnel->parms.iph.daddr && skb_dst(skb))
742 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
743 738
744 if (skb->len > mtu) { 739 if (mtu < IPV6_MIN_MTU) {
745 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 740 mtu = IPV6_MIN_MTU;
746 ip_rt_put(rt); 741 df = 0;
747 goto tx_error; 742 }
743
744 if (tunnel->parms.iph.daddr && skb_dst(skb))
745 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
746
747 if (skb->len > mtu) {
748 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
749 ip_rt_put(rt);
750 goto tx_error;
751 }
748 } 752 }
749 753
750 if (tunnel->err_count > 0) { 754 if (tunnel->err_count > 0) {
@@ -792,11 +796,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
792 iph = ip_hdr(skb); 796 iph = ip_hdr(skb);
793 iph->version = 4; 797 iph->version = 4;
794 iph->ihl = sizeof(struct iphdr)>>2; 798 iph->ihl = sizeof(struct iphdr)>>2;
795 if (mtu > IPV6_MIN_MTU) 799 iph->frag_off = df;
796 iph->frag_off = tiph->frag_off;
797 else
798 iph->frag_off = 0;
799
800 iph->protocol = IPPROTO_IPV6; 800 iph->protocol = IPPROTO_IPV6;
801 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 801 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
802 iph->daddr = rt->rt_dst; 802 iph->daddr = rt->rt_dst;
@@ -1164,17 +1164,8 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1164 1164
1165static int sit_init_net(struct net *net) 1165static int sit_init_net(struct net *net)
1166{ 1166{
1167 struct sit_net *sitn = net_generic(net, sit_net_id);
1167 int err; 1168 int err;
1168 struct sit_net *sitn;
1169
1170 err = -ENOMEM;
1171 sitn = kzalloc(sizeof(struct sit_net), GFP_KERNEL);
1172 if (sitn == NULL)
1173 goto err_alloc;
1174
1175 err = net_assign_generic(net, sit_net_id, sitn);
1176 if (err < 0)
1177 goto err_assign;
1178 1169
1179 sitn->tunnels[0] = sitn->tunnels_wc; 1170 sitn->tunnels[0] = sitn->tunnels_wc;
1180 sitn->tunnels[1] = sitn->tunnels_l; 1171 sitn->tunnels[1] = sitn->tunnels_l;
@@ -1201,37 +1192,33 @@ err_reg_dev:
1201 dev_put(sitn->fb_tunnel_dev); 1192 dev_put(sitn->fb_tunnel_dev);
1202 free_netdev(sitn->fb_tunnel_dev); 1193 free_netdev(sitn->fb_tunnel_dev);
1203err_alloc_dev: 1194err_alloc_dev:
1204 /* nothing */
1205err_assign:
1206 kfree(sitn);
1207err_alloc:
1208 return err; 1195 return err;
1209} 1196}
1210 1197
1211static void sit_exit_net(struct net *net) 1198static void sit_exit_net(struct net *net)
1212{ 1199{
1213 struct sit_net *sitn; 1200 struct sit_net *sitn = net_generic(net, sit_net_id);
1214 LIST_HEAD(list); 1201 LIST_HEAD(list);
1215 1202
1216 sitn = net_generic(net, sit_net_id);
1217 rtnl_lock(); 1203 rtnl_lock();
1218 sit_destroy_tunnels(sitn, &list); 1204 sit_destroy_tunnels(sitn, &list);
1219 unregister_netdevice_queue(sitn->fb_tunnel_dev, &list); 1205 unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1220 unregister_netdevice_many(&list); 1206 unregister_netdevice_many(&list);
1221 rtnl_unlock(); 1207 rtnl_unlock();
1222 kfree(sitn);
1223} 1208}
1224 1209
1225static struct pernet_operations sit_net_ops = { 1210static struct pernet_operations sit_net_ops = {
1226 .init = sit_init_net, 1211 .init = sit_init_net,
1227 .exit = sit_exit_net, 1212 .exit = sit_exit_net,
1213 .id = &sit_net_id,
1214 .size = sizeof(struct sit_net),
1228}; 1215};
1229 1216
1230static void __exit sit_cleanup(void) 1217static void __exit sit_cleanup(void)
1231{ 1218{
1232 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1219 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1233 1220
1234 unregister_pernet_gen_device(sit_net_id, &sit_net_ops); 1221 unregister_pernet_device(&sit_net_ops);
1235 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1222 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1236} 1223}
1237 1224
@@ -1246,7 +1233,7 @@ static int __init sit_init(void)
1246 return -EAGAIN; 1233 return -EAGAIN;
1247 } 1234 }
1248 1235
1249 err = register_pernet_gen_device(&sit_net_id, &sit_net_ops); 1236 err = register_pernet_device(&sit_net_ops);
1250 if (err < 0) 1237 if (err < 0)
1251 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1238 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1252 1239
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 612fc53e0bb9..5b9af508b8f2 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -159,6 +159,8 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
159 159
160struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 160struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
161{ 161{
162 struct tcp_options_received tcp_opt;
163 u8 *hash_location;
162 struct inet_request_sock *ireq; 164 struct inet_request_sock *ireq;
163 struct inet6_request_sock *ireq6; 165 struct inet6_request_sock *ireq6;
164 struct tcp_request_sock *treq; 166 struct tcp_request_sock *treq;
@@ -171,7 +173,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
171 int mss; 173 int mss;
172 struct dst_entry *dst; 174 struct dst_entry *dst;
173 __u8 rcv_wscale; 175 __u8 rcv_wscale;
174 struct tcp_options_received tcp_opt;
175 176
176 if (!sysctl_tcp_syncookies || !th->ack) 177 if (!sysctl_tcp_syncookies || !th->ack)
177 goto out; 178 goto out;
@@ -254,7 +255,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
254 255
255 /* check for timestamp cookie support */ 256 /* check for timestamp cookie support */
256 memset(&tcp_opt, 0, sizeof(tcp_opt)); 257 memset(&tcp_opt, 0, sizeof(tcp_opt));
257 tcp_parse_options(skb, &tcp_opt, 0, dst); 258 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
258 259
259 if (tcp_opt.saw_tstamp) 260 if (tcp_opt.saw_tstamp)
260 cookie_check_timestamp(&tcp_opt); 261 cookie_check_timestamp(&tcp_opt);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 34925f089e07..aadd7cef73b3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -461,7 +461,8 @@ out:
461} 461}
462 462
463 463
464static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) 464static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
465 struct request_values *rvp)
465{ 466{
466 struct inet6_request_sock *treq = inet6_rsk(req); 467 struct inet6_request_sock *treq = inet6_rsk(req);
467 struct ipv6_pinfo *np = inet6_sk(sk); 468 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -499,7 +500,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
499 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) 500 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
500 goto done; 501 goto done;
501 502
502 skb = tcp_make_synack(sk, dst, req); 503 skb = tcp_make_synack(sk, dst, req, rvp);
503 if (skb) { 504 if (skb) {
504 struct tcphdr *th = tcp_hdr(skb); 505 struct tcphdr *th = tcp_hdr(skb);
505 506
@@ -1161,13 +1162,15 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1161 */ 1162 */
1162static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 1163static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1163{ 1164{
1165 struct tcp_extend_values tmp_ext;
1166 struct tcp_options_received tmp_opt;
1167 u8 *hash_location;
1168 struct request_sock *req;
1164 struct inet6_request_sock *treq; 1169 struct inet6_request_sock *treq;
1165 struct ipv6_pinfo *np = inet6_sk(sk); 1170 struct ipv6_pinfo *np = inet6_sk(sk);
1166 struct tcp_options_received tmp_opt;
1167 struct tcp_sock *tp = tcp_sk(sk); 1171 struct tcp_sock *tp = tcp_sk(sk);
1168 struct request_sock *req = NULL;
1169 __u32 isn = TCP_SKB_CB(skb)->when;
1170 struct dst_entry *dst = __sk_dst_get(sk); 1172 struct dst_entry *dst = __sk_dst_get(sk);
1173 __u32 isn = TCP_SKB_CB(skb)->when;
1171#ifdef CONFIG_SYN_COOKIES 1174#ifdef CONFIG_SYN_COOKIES
1172 int want_cookie = 0; 1175 int want_cookie = 0;
1173#else 1176#else
@@ -1205,8 +1208,52 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1205 tcp_clear_options(&tmp_opt); 1208 tcp_clear_options(&tmp_opt);
1206 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1209 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1207 tmp_opt.user_mss = tp->rx_opt.user_mss; 1210 tmp_opt.user_mss = tp->rx_opt.user_mss;
1211 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
1212
1213 if (tmp_opt.cookie_plus > 0 &&
1214 tmp_opt.saw_tstamp &&
1215 !tp->rx_opt.cookie_out_never &&
1216 (sysctl_tcp_cookie_size > 0 ||
1217 (tp->cookie_values != NULL &&
1218 tp->cookie_values->cookie_desired > 0))) {
1219 u8 *c;
1220 u32 *d;
1221 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1222 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1223
1224 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1225 goto drop_and_free;
1226
1227 /* Secret recipe starts with IP addresses */
1228 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1229 *mess++ ^= *d++;
1230 *mess++ ^= *d++;
1231 *mess++ ^= *d++;
1232 *mess++ ^= *d++;
1233 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1234 *mess++ ^= *d++;
1235 *mess++ ^= *d++;
1236 *mess++ ^= *d++;
1237 *mess++ ^= *d++;
1238
1239 /* plus variable length Initiator Cookie */
1240 c = (u8 *)mess;
1241 while (l-- > 0)
1242 *c++ ^= *hash_location++;
1208 1243
1209 tcp_parse_options(skb, &tmp_opt, 0, dst); 1244#ifdef CONFIG_SYN_COOKIES
1245 want_cookie = 0; /* not our kind of cookie */
1246#endif
1247 tmp_ext.cookie_out_never = 0; /* false */
1248 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1249 } else if (!tp->rx_opt.cookie_in_always) {
1250 /* redundant indications, but ensure initialization. */
1251 tmp_ext.cookie_out_never = 1; /* true */
1252 tmp_ext.cookie_plus = 0;
1253 } else {
1254 goto drop_and_free;
1255 }
1256 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1210 1257
1211 if (want_cookie && !tmp_opt.saw_tstamp) 1258 if (want_cookie && !tmp_opt.saw_tstamp)
1212 tcp_clear_options(&tmp_opt); 1259 tcp_clear_options(&tmp_opt);
@@ -1239,23 +1286,21 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1239 1286
1240 isn = tcp_v6_init_sequence(skb); 1287 isn = tcp_v6_init_sequence(skb);
1241 } 1288 }
1242
1243 tcp_rsk(req)->snt_isn = isn; 1289 tcp_rsk(req)->snt_isn = isn;
1244 1290
1245 security_inet_conn_request(sk, skb, req); 1291 security_inet_conn_request(sk, skb, req);
1246 1292
1247 if (tcp_v6_send_synack(sk, req)) 1293 if (tcp_v6_send_synack(sk, req,
1248 goto drop; 1294 (struct request_values *)&tmp_ext) ||
1295 want_cookie)
1296 goto drop_and_free;
1249 1297
1250 if (!want_cookie) { 1298 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1251 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1299 return 0;
1252 return 0;
1253 }
1254 1300
1301drop_and_free:
1302 reqsk_free(req);
1255drop: 1303drop:
1256 if (req)
1257 reqsk_free(req);
1258
1259 return 0; /* don't send reset */ 1304 return 0; /* don't send reset */
1260} 1305}
1261 1306
@@ -1851,7 +1896,7 @@ static int tcp_v6_init_sock(struct sock *sk)
1851 */ 1896 */
1852 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1897 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1853 tp->snd_cwnd_clamp = ~0; 1898 tp->snd_cwnd_clamp = ~0;
1854 tp->mss_cache = 536; 1899 tp->mss_cache = TCP_MSS_DEFAULT;
1855 1900
1856 tp->reordering = sysctl_tcp_reordering; 1901 tp->reordering = sysctl_tcp_reordering;
1857 1902
@@ -1867,6 +1912,19 @@ static int tcp_v6_init_sock(struct sock *sk)
1867 tp->af_specific = &tcp_sock_ipv6_specific; 1912 tp->af_specific = &tcp_sock_ipv6_specific;
1868#endif 1913#endif
1869 1914
1915 /* TCP Cookie Transactions */
1916 if (sysctl_tcp_cookie_size > 0) {
1917 /* Default, cookies without s_data_payload. */
1918 tp->cookie_values =
1919 kzalloc(sizeof(*tp->cookie_values),
1920 sk->sk_allocation);
1921 if (tp->cookie_values != NULL)
1922 kref_init(&tp->cookie_values->kref);
1923 }
1924 /* Presumed zeroed, in order of appearance:
1925 * cookie_in_always, cookie_out_never,
1926 * s_data_constant, s_data_in, s_data_out
1927 */
1870 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1928 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1871 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1929 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1872 1930
@@ -2112,7 +2170,6 @@ static struct inet_protosw tcpv6_protosw = {
2112 .protocol = IPPROTO_TCP, 2170 .protocol = IPPROTO_TCP,
2113 .prot = &tcpv6_prot, 2171 .prot = &tcpv6_prot,
2114 .ops = &inet6_stream_ops, 2172 .ops = &inet6_stream_ops,
2115 .capability = -1,
2116 .no_check = 0, 2173 .no_check = 0,
2117 .flags = INET_PROTOSW_PERMANENT | 2174 .flags = INET_PROTOSW_PERMANENT |
2118 INET_PROTOSW_ICSK, 2175 INET_PROTOSW_ICSK,
@@ -2127,12 +2184,17 @@ static int tcpv6_net_init(struct net *net)
2127static void tcpv6_net_exit(struct net *net) 2184static void tcpv6_net_exit(struct net *net)
2128{ 2185{
2129 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2186 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2130 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6); 2187}
2188
2189static void tcpv6_net_exit_batch(struct list_head *net_exit_list)
2190{
2191 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2131} 2192}
2132 2193
2133static struct pernet_operations tcpv6_net_ops = { 2194static struct pernet_operations tcpv6_net_ops = {
2134 .init = tcpv6_net_init, 2195 .init = tcpv6_net_init,
2135 .exit = tcpv6_net_exit, 2196 .exit = tcpv6_net_exit,
2197 .exit_batch = tcpv6_net_exit_batch,
2136}; 2198};
2137 2199
2138int __init tcpv6_init(void) 2200int __init tcpv6_init(void)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d3b59d73f507..69ebdbe78c47 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -81,9 +81,33 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81 return 0; 81 return 0;
82} 82}
83 83
84static unsigned int udp6_portaddr_hash(struct net *net,
85 const struct in6_addr *addr6,
86 unsigned int port)
87{
88 unsigned int hash, mix = net_hash_mix(net);
89
90 if (ipv6_addr_any(addr6))
91 hash = jhash_1word(0, mix);
92 else if (ipv6_addr_v4mapped(addr6))
93 hash = jhash_1word(addr6->s6_addr32[3], mix);
94 else
95 hash = jhash2(addr6->s6_addr32, 4, mix);
96
97 return hash ^ port;
98}
99
100
84int udp_v6_get_port(struct sock *sk, unsigned short snum) 101int udp_v6_get_port(struct sock *sk, unsigned short snum)
85{ 102{
86 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal); 103 unsigned int hash2_nulladdr =
104 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
105 unsigned int hash2_partial =
106 udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
107
108 /* precompute partial secondary hash */
109 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
110 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
87} 111}
88 112
89static inline int compute_score(struct sock *sk, struct net *net, 113static inline int compute_score(struct sock *sk, struct net *net,
@@ -94,7 +118,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
94{ 118{
95 int score = -1; 119 int score = -1;
96 120
97 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 121 if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
98 sk->sk_family == PF_INET6) { 122 sk->sk_family == PF_INET6) {
99 struct ipv6_pinfo *np = inet6_sk(sk); 123 struct ipv6_pinfo *np = inet6_sk(sk);
100 struct inet_sock *inet = inet_sk(sk); 124 struct inet_sock *inet = inet_sk(sk);
@@ -124,6 +148,86 @@ static inline int compute_score(struct sock *sk, struct net *net,
124 return score; 148 return score;
125} 149}
126 150
151#define SCORE2_MAX (1 + 1 + 1)
152static inline int compute_score2(struct sock *sk, struct net *net,
153 const struct in6_addr *saddr, __be16 sport,
154 const struct in6_addr *daddr, unsigned short hnum,
155 int dif)
156{
157 int score = -1;
158
159 if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
160 sk->sk_family == PF_INET6) {
161 struct ipv6_pinfo *np = inet6_sk(sk);
162 struct inet_sock *inet = inet_sk(sk);
163
164 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
165 return -1;
166 score = 0;
167 if (inet->inet_dport) {
168 if (inet->inet_dport != sport)
169 return -1;
170 score++;
171 }
172 if (!ipv6_addr_any(&np->daddr)) {
173 if (!ipv6_addr_equal(&np->daddr, saddr))
174 return -1;
175 score++;
176 }
177 if (sk->sk_bound_dev_if) {
178 if (sk->sk_bound_dev_if != dif)
179 return -1;
180 score++;
181 }
182 }
183 return score;
184}
185
186
187/* called with read_rcu_lock() */
188static struct sock *udp6_lib_lookup2(struct net *net,
189 const struct in6_addr *saddr, __be16 sport,
190 const struct in6_addr *daddr, unsigned int hnum, int dif,
191 struct udp_hslot *hslot2, unsigned int slot2)
192{
193 struct sock *sk, *result;
194 struct hlist_nulls_node *node;
195 int score, badness;
196
197begin:
198 result = NULL;
199 badness = -1;
200 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
201 score = compute_score2(sk, net, saddr, sport,
202 daddr, hnum, dif);
203 if (score > badness) {
204 result = sk;
205 badness = score;
206 if (score == SCORE2_MAX)
207 goto exact_match;
208 }
209 }
210 /*
211 * if the nulls value we got at the end of this lookup is
212 * not the expected one, we must restart lookup.
213 * We probably met an item that was moved to another chain.
214 */
215 if (get_nulls_value(node) != slot2)
216 goto begin;
217
218 if (result) {
219exact_match:
220 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
221 result = NULL;
222 else if (unlikely(compute_score2(result, net, saddr, sport,
223 daddr, hnum, dif) < badness)) {
224 sock_put(result);
225 goto begin;
226 }
227 }
228 return result;
229}
230
127static struct sock *__udp6_lib_lookup(struct net *net, 231static struct sock *__udp6_lib_lookup(struct net *net,
128 struct in6_addr *saddr, __be16 sport, 232 struct in6_addr *saddr, __be16 sport,
129 struct in6_addr *daddr, __be16 dport, 233 struct in6_addr *daddr, __be16 dport,
@@ -132,11 +236,35 @@ static struct sock *__udp6_lib_lookup(struct net *net,
132 struct sock *sk, *result; 236 struct sock *sk, *result;
133 struct hlist_nulls_node *node; 237 struct hlist_nulls_node *node;
134 unsigned short hnum = ntohs(dport); 238 unsigned short hnum = ntohs(dport);
135 unsigned int hash = udp_hashfn(net, hnum, udptable->mask); 239 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
136 struct udp_hslot *hslot = &udptable->hash[hash]; 240 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
137 int score, badness; 241 int score, badness;
138 242
139 rcu_read_lock(); 243 rcu_read_lock();
244 if (hslot->count > 10) {
245 hash2 = udp6_portaddr_hash(net, daddr, hnum);
246 slot2 = hash2 & udptable->mask;
247 hslot2 = &udptable->hash2[slot2];
248 if (hslot->count < hslot2->count)
249 goto begin;
250
251 result = udp6_lib_lookup2(net, saddr, sport,
252 daddr, hnum, dif,
253 hslot2, slot2);
254 if (!result) {
255 hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
256 slot2 = hash2 & udptable->mask;
257 hslot2 = &udptable->hash2[slot2];
258 if (hslot->count < hslot2->count)
259 goto begin;
260
261 result = udp6_lib_lookup2(net, &in6addr_any, sport,
262 daddr, hnum, dif,
263 hslot2, slot2);
264 }
265 rcu_read_unlock();
266 return result;
267 }
140begin: 268begin:
141 result = NULL; 269 result = NULL;
142 badness = -1; 270 badness = -1;
@@ -152,7 +280,7 @@ begin:
152 * not the expected one, we must restart lookup. 280 * not the expected one, we must restart lookup.
153 * We probably met an item that was moved to another chain. 281 * We probably met an item that was moved to another chain.
154 */ 282 */
155 if (get_nulls_value(node) != hash) 283 if (get_nulls_value(node) != slot)
156 goto begin; 284 goto begin;
157 285
158 if (result) { 286 if (result) {
@@ -288,9 +416,7 @@ try_again:
288 err = ulen; 416 err = ulen;
289 417
290out_free: 418out_free:
291 lock_sock(sk); 419 skb_free_datagram_locked(sk, skb);
292 skb_free_datagram(sk, skb);
293 release_sock(sk);
294out: 420out:
295 return err; 421 return err;
296 422
@@ -417,7 +543,8 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
417 if (!net_eq(sock_net(s), net)) 543 if (!net_eq(sock_net(s), net))
418 continue; 544 continue;
419 545
420 if (s->sk_hash == num && s->sk_family == PF_INET6) { 546 if (udp_sk(s)->udp_port_hash == num &&
547 s->sk_family == PF_INET6) {
421 struct ipv6_pinfo *np = inet6_sk(s); 548 struct ipv6_pinfo *np = inet6_sk(s);
422 if (inet->inet_dport) { 549 if (inet->inet_dport) {
423 if (inet->inet_dport != rmt_port) 550 if (inet->inet_dport != rmt_port)
@@ -442,6 +569,33 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
442 return NULL; 569 return NULL;
443} 570}
444 571
572static void flush_stack(struct sock **stack, unsigned int count,
573 struct sk_buff *skb, unsigned int final)
574{
575 unsigned int i;
576 struct sock *sk;
577 struct sk_buff *skb1;
578
579 for (i = 0; i < count; i++) {
580 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
581
582 sk = stack[i];
583 if (skb1) {
584 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1);
587 else
588 sk_add_backlog(sk, skb1);
589 bh_unlock_sock(sk);
590 } else {
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 }
597 }
598}
445/* 599/*
446 * Note: called only from the BH handler context, 600 * Note: called only from the BH handler context,
447 * so we don't need to lock the hashes. 601 * so we don't need to lock the hashes.
@@ -450,41 +604,43 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
450 struct in6_addr *saddr, struct in6_addr *daddr, 604 struct in6_addr *saddr, struct in6_addr *daddr,
451 struct udp_table *udptable) 605 struct udp_table *udptable)
452{ 606{
453 struct sock *sk, *sk2; 607 struct sock *sk, *stack[256 / sizeof(struct sock *)];
454 const struct udphdr *uh = udp_hdr(skb); 608 const struct udphdr *uh = udp_hdr(skb);
455 struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); 609 struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
456 int dif; 610 int dif;
611 unsigned int i, count = 0;
457 612
458 spin_lock(&hslot->lock); 613 spin_lock(&hslot->lock);
459 sk = sk_nulls_head(&hslot->head); 614 sk = sk_nulls_head(&hslot->head);
460 dif = inet6_iif(skb); 615 dif = inet6_iif(skb);
461 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 616 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
462 if (!sk) { 617 while (sk) {
463 kfree_skb(skb); 618 stack[count++] = sk;
464 goto out; 619 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
465 } 620 uh->source, saddr, dif);
466 621 if (unlikely(count == ARRAY_SIZE(stack))) {
467 sk2 = sk; 622 if (!sk)
468 while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr, 623 break;
469 uh->source, saddr, dif))) { 624 flush_stack(stack, count, skb, ~0);
470 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); 625 count = 0;
471 if (buff) {
472 bh_lock_sock(sk2);
473 if (!sock_owned_by_user(sk2))
474 udpv6_queue_rcv_skb(sk2, buff);
475 else
476 sk_add_backlog(sk2, buff);
477 bh_unlock_sock(sk2);
478 } 626 }
479 } 627 }
480 bh_lock_sock(sk); 628 /*
481 if (!sock_owned_by_user(sk)) 629 * before releasing the lock, we must take reference on sockets
482 udpv6_queue_rcv_skb(sk, skb); 630 */
483 else 631 for (i = 0; i < count; i++)
484 sk_add_backlog(sk, skb); 632 sock_hold(stack[i]);
485 bh_unlock_sock(sk); 633
486out:
487 spin_unlock(&hslot->lock); 634 spin_unlock(&hslot->lock);
635
636 if (count) {
637 flush_stack(stack, count, skb, count - 1);
638
639 for (i = 0; i < count; i++)
640 sock_put(stack[i]);
641 } else {
642 kfree_skb(skb);
643 }
488 return 0; 644 return 0;
489} 645}
490 646
@@ -1286,7 +1442,6 @@ static struct inet_protosw udpv6_protosw = {
1286 .protocol = IPPROTO_UDP, 1442 .protocol = IPPROTO_UDP,
1287 .prot = &udpv6_prot, 1443 .prot = &udpv6_prot,
1288 .ops = &inet6_dgram_ops, 1444 .ops = &inet6_dgram_ops,
1289 .capability =-1,
1290 .no_check = UDP_CSUM_DEFAULT, 1445 .no_check = UDP_CSUM_DEFAULT,
1291 .flags = INET_PROTOSW_PERMANENT, 1446 .flags = INET_PROTOSW_PERMANENT,
1292}; 1447};
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index d737a27ee010..6ea6938919e6 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -62,7 +62,6 @@ static struct inet_protosw udplite6_protosw = {
62 .protocol = IPPROTO_UDPLITE, 62 .protocol = IPPROTO_UDPLITE,
63 .prot = &udplitev6_prot, 63 .prot = &udplitev6_prot,
64 .ops = &inet6_dgram_ops, 64 .ops = &inet6_dgram_ops,
65 .capability = -1,
66 .no_check = 0, 65 .no_check = 0,
67 .flags = INET_PROTOSW_PERMANENT, 66 .flags = INET_PROTOSW_PERMANENT,
68}; 67};
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 6481ee4bdf72..f9759b54a6de 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1298,6 +1298,7 @@ static int ipx_setsockopt(struct socket *sock, int level, int optname,
1298 int opt; 1298 int opt;
1299 int rc = -EINVAL; 1299 int rc = -EINVAL;
1300 1300
1301 lock_kernel();
1301 if (optlen != sizeof(int)) 1302 if (optlen != sizeof(int))
1302 goto out; 1303 goto out;
1303 1304
@@ -1312,6 +1313,7 @@ static int ipx_setsockopt(struct socket *sock, int level, int optname,
1312 ipx_sk(sk)->type = opt; 1313 ipx_sk(sk)->type = opt;
1313 rc = 0; 1314 rc = 0;
1314out: 1315out:
1316 unlock_kernel();
1315 return rc; 1317 return rc;
1316} 1318}
1317 1319
@@ -1323,6 +1325,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname,
1323 int len; 1325 int len;
1324 int rc = -ENOPROTOOPT; 1326 int rc = -ENOPROTOOPT;
1325 1327
1328 lock_kernel();
1326 if (!(level == SOL_IPX && optname == IPX_TYPE)) 1329 if (!(level == SOL_IPX && optname == IPX_TYPE))
1327 goto out; 1330 goto out;
1328 1331
@@ -1343,6 +1346,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname,
1343 1346
1344 rc = 0; 1347 rc = 0;
1345out: 1348out:
1349 unlock_kernel();
1346 return rc; 1350 return rc;
1347} 1351}
1348 1352
@@ -1352,12 +1356,13 @@ static struct proto ipx_proto = {
1352 .obj_size = sizeof(struct ipx_sock), 1356 .obj_size = sizeof(struct ipx_sock),
1353}; 1357};
1354 1358
1355static int ipx_create(struct net *net, struct socket *sock, int protocol) 1359static int ipx_create(struct net *net, struct socket *sock, int protocol,
1360 int kern)
1356{ 1361{
1357 int rc = -ESOCKTNOSUPPORT; 1362 int rc = -ESOCKTNOSUPPORT;
1358 struct sock *sk; 1363 struct sock *sk;
1359 1364
1360 if (net != &init_net) 1365 if (!net_eq(net, &init_net))
1361 return -EAFNOSUPPORT; 1366 return -EAFNOSUPPORT;
1362 1367
1363 /* 1368 /*
@@ -1390,6 +1395,7 @@ static int ipx_release(struct socket *sock)
1390 if (!sk) 1395 if (!sk)
1391 goto out; 1396 goto out;
1392 1397
1398 lock_kernel();
1393 if (!sock_flag(sk, SOCK_DEAD)) 1399 if (!sock_flag(sk, SOCK_DEAD))
1394 sk->sk_state_change(sk); 1400 sk->sk_state_change(sk);
1395 1401
@@ -1397,6 +1403,7 @@ static int ipx_release(struct socket *sock)
1397 sock->sk = NULL; 1403 sock->sk = NULL;
1398 sk_refcnt_debug_release(sk); 1404 sk_refcnt_debug_release(sk);
1399 ipx_destroy_socket(sk); 1405 ipx_destroy_socket(sk);
1406 unlock_kernel();
1400out: 1407out:
1401 return 0; 1408 return 0;
1402} 1409}
@@ -1424,7 +1431,8 @@ static __be16 ipx_first_free_socketnum(struct ipx_interface *intrfc)
1424 return htons(socketNum); 1431 return htons(socketNum);
1425} 1432}
1426 1433
1427static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 1434static int __ipx_bind(struct socket *sock,
1435 struct sockaddr *uaddr, int addr_len)
1428{ 1436{
1429 struct sock *sk = sock->sk; 1437 struct sock *sk = sock->sk;
1430 struct ipx_sock *ipxs = ipx_sk(sk); 1438 struct ipx_sock *ipxs = ipx_sk(sk);
@@ -1519,6 +1527,17 @@ out:
1519 return rc; 1527 return rc;
1520} 1528}
1521 1529
1530static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1531{
1532 int rc;
1533
1534 lock_kernel();
1535 rc = __ipx_bind(sock, uaddr, addr_len);
1536 unlock_kernel();
1537
1538 return rc;
1539}
1540
1522static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, 1541static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
1523 int addr_len, int flags) 1542 int addr_len, int flags)
1524{ 1543{
@@ -1531,6 +1550,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
1531 sk->sk_state = TCP_CLOSE; 1550 sk->sk_state = TCP_CLOSE;
1532 sock->state = SS_UNCONNECTED; 1551 sock->state = SS_UNCONNECTED;
1533 1552
1553 lock_kernel();
1534 if (addr_len != sizeof(*addr)) 1554 if (addr_len != sizeof(*addr))
1535 goto out; 1555 goto out;
1536 addr = (struct sockaddr_ipx *)uaddr; 1556 addr = (struct sockaddr_ipx *)uaddr;
@@ -1550,7 +1570,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
1550 IPX_NODE_LEN); 1570 IPX_NODE_LEN);
1551#endif /* CONFIG_IPX_INTERN */ 1571#endif /* CONFIG_IPX_INTERN */
1552 1572
1553 rc = ipx_bind(sock, (struct sockaddr *)&uaddr, 1573 rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
1554 sizeof(struct sockaddr_ipx)); 1574 sizeof(struct sockaddr_ipx));
1555 if (rc) 1575 if (rc)
1556 goto out; 1576 goto out;
@@ -1577,6 +1597,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
1577 ipxrtr_put(rt); 1597 ipxrtr_put(rt);
1578 rc = 0; 1598 rc = 0;
1579out: 1599out:
1600 unlock_kernel();
1580 return rc; 1601 return rc;
1581} 1602}
1582 1603
@@ -1592,6 +1613,7 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
1592 1613
1593 *uaddr_len = sizeof(struct sockaddr_ipx); 1614 *uaddr_len = sizeof(struct sockaddr_ipx);
1594 1615
1616 lock_kernel();
1595 if (peer) { 1617 if (peer) {
1596 rc = -ENOTCONN; 1618 rc = -ENOTCONN;
1597 if (sk->sk_state != TCP_ESTABLISHED) 1619 if (sk->sk_state != TCP_ESTABLISHED)
@@ -1626,6 +1648,19 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
1626 1648
1627 rc = 0; 1649 rc = 0;
1628out: 1650out:
1651 unlock_kernel();
1652 return rc;
1653}
1654
1655static unsigned int ipx_datagram_poll(struct file *file, struct socket *sock,
1656 poll_table *wait)
1657{
1658 int rc;
1659
1660 lock_kernel();
1661 rc = datagram_poll(file, sock, wait);
1662 unlock_kernel();
1663
1629 return rc; 1664 return rc;
1630} 1665}
1631 1666
@@ -1700,6 +1735,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
1700 int rc = -EINVAL; 1735 int rc = -EINVAL;
1701 int flags = msg->msg_flags; 1736 int flags = msg->msg_flags;
1702 1737
1738 lock_kernel();
1703 /* Socket gets bound below anyway */ 1739 /* Socket gets bound below anyway */
1704/* if (sk->sk_zapped) 1740/* if (sk->sk_zapped)
1705 return -EIO; */ /* Socket not bound */ 1741 return -EIO; */ /* Socket not bound */
@@ -1723,7 +1759,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
1723 memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, 1759 memcpy(uaddr.sipx_node, ipxs->intrfc->if_node,
1724 IPX_NODE_LEN); 1760 IPX_NODE_LEN);
1725#endif 1761#endif
1726 rc = ipx_bind(sock, (struct sockaddr *)&uaddr, 1762 rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
1727 sizeof(struct sockaddr_ipx)); 1763 sizeof(struct sockaddr_ipx));
1728 if (rc) 1764 if (rc)
1729 goto out; 1765 goto out;
@@ -1751,6 +1787,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
1751 if (rc >= 0) 1787 if (rc >= 0)
1752 rc = len; 1788 rc = len;
1753out: 1789out:
1790 unlock_kernel();
1754 return rc; 1791 return rc;
1755} 1792}
1756 1793
@@ -1765,6 +1802,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1765 struct sk_buff *skb; 1802 struct sk_buff *skb;
1766 int copied, rc; 1803 int copied, rc;
1767 1804
1805 lock_kernel();
1768 /* put the autobinding in */ 1806 /* put the autobinding in */
1769 if (!ipxs->port) { 1807 if (!ipxs->port) {
1770 struct sockaddr_ipx uaddr; 1808 struct sockaddr_ipx uaddr;
@@ -1779,7 +1817,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1779 memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); 1817 memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
1780#endif /* CONFIG_IPX_INTERN */ 1818#endif /* CONFIG_IPX_INTERN */
1781 1819
1782 rc = ipx_bind(sock, (struct sockaddr *)&uaddr, 1820 rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
1783 sizeof(struct sockaddr_ipx)); 1821 sizeof(struct sockaddr_ipx));
1784 if (rc) 1822 if (rc)
1785 goto out; 1823 goto out;
@@ -1823,6 +1861,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1823out_free: 1861out_free:
1824 skb_free_datagram(sk, skb); 1862 skb_free_datagram(sk, skb);
1825out: 1863out:
1864 unlock_kernel();
1826 return rc; 1865 return rc;
1827} 1866}
1828 1867
@@ -1834,6 +1873,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1834 struct sock *sk = sock->sk; 1873 struct sock *sk = sock->sk;
1835 void __user *argp = (void __user *)arg; 1874 void __user *argp = (void __user *)arg;
1836 1875
1876 lock_kernel();
1837 switch (cmd) { 1877 switch (cmd) {
1838 case TIOCOUTQ: 1878 case TIOCOUTQ:
1839 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1879 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
@@ -1896,6 +1936,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1896 rc = -ENOIOCTLCMD; 1936 rc = -ENOIOCTLCMD;
1897 break; 1937 break;
1898 } 1938 }
1939 unlock_kernel();
1899 1940
1900 return rc; 1941 return rc;
1901} 1942}
@@ -1933,7 +1974,7 @@ static const struct net_proto_family ipx_family_ops = {
1933 .owner = THIS_MODULE, 1974 .owner = THIS_MODULE,
1934}; 1975};
1935 1976
1936static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = { 1977static const struct proto_ops ipx_dgram_ops = {
1937 .family = PF_IPX, 1978 .family = PF_IPX,
1938 .owner = THIS_MODULE, 1979 .owner = THIS_MODULE,
1939 .release = ipx_release, 1980 .release = ipx_release,
@@ -1942,7 +1983,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = {
1942 .socketpair = sock_no_socketpair, 1983 .socketpair = sock_no_socketpair,
1943 .accept = sock_no_accept, 1984 .accept = sock_no_accept,
1944 .getname = ipx_getname, 1985 .getname = ipx_getname,
1945 .poll = datagram_poll, 1986 .poll = ipx_datagram_poll,
1946 .ioctl = ipx_ioctl, 1987 .ioctl = ipx_ioctl,
1947#ifdef CONFIG_COMPAT 1988#ifdef CONFIG_COMPAT
1948 .compat_ioctl = ipx_compat_ioctl, 1989 .compat_ioctl = ipx_compat_ioctl,
@@ -1957,8 +1998,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = {
1957 .sendpage = sock_no_sendpage, 1998 .sendpage = sock_no_sendpage,
1958}; 1999};
1959 2000
1960SOCKOPS_WRAP(ipx_dgram, PF_IPX);
1961
1962static struct packet_type ipx_8023_packet_type __read_mostly = { 2001static struct packet_type ipx_8023_packet_type __read_mostly = {
1963 .type = cpu_to_be16(ETH_P_802_3), 2002 .type = cpu_to_be16(ETH_P_802_3),
1964 .func = ipx_rcv, 2003 .func = ipx_rcv,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 9429e4002bca..10093aab6173 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -61,7 +61,7 @@
61 61
62#include <net/irda/af_irda.h> 62#include <net/irda/af_irda.h>
63 63
64static int irda_create(struct net *net, struct socket *sock, int protocol); 64static int irda_create(struct net *net, struct socket *sock, int protocol, int kern);
65 65
66static const struct proto_ops irda_stream_ops; 66static const struct proto_ops irda_stream_ops;
67static const struct proto_ops irda_seqpacket_ops; 67static const struct proto_ops irda_seqpacket_ops;
@@ -714,11 +714,14 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
714 struct sockaddr_irda saddr; 714 struct sockaddr_irda saddr;
715 struct sock *sk = sock->sk; 715 struct sock *sk = sock->sk;
716 struct irda_sock *self = irda_sk(sk); 716 struct irda_sock *self = irda_sk(sk);
717 int err;
717 718
719 lock_kernel();
718 memset(&saddr, 0, sizeof(saddr)); 720 memset(&saddr, 0, sizeof(saddr));
719 if (peer) { 721 if (peer) {
722 err = -ENOTCONN;
720 if (sk->sk_state != TCP_ESTABLISHED) 723 if (sk->sk_state != TCP_ESTABLISHED)
721 return -ENOTCONN; 724 goto out;
722 725
723 saddr.sir_family = AF_IRDA; 726 saddr.sir_family = AF_IRDA;
724 saddr.sir_lsap_sel = self->dtsap_sel; 727 saddr.sir_lsap_sel = self->dtsap_sel;
@@ -735,8 +738,10 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
735 /* uaddr_len come to us uninitialised */ 738 /* uaddr_len come to us uninitialised */
736 *uaddr_len = sizeof (struct sockaddr_irda); 739 *uaddr_len = sizeof (struct sockaddr_irda);
737 memcpy(uaddr, &saddr, *uaddr_len); 740 memcpy(uaddr, &saddr, *uaddr_len);
738 741 err = 0;
739 return 0; 742out:
743 unlock_kernel();
744 return err;
740} 745}
741 746
742/* 747/*
@@ -748,21 +753,25 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
748static int irda_listen(struct socket *sock, int backlog) 753static int irda_listen(struct socket *sock, int backlog)
749{ 754{
750 struct sock *sk = sock->sk; 755 struct sock *sk = sock->sk;
756 int err = -EOPNOTSUPP;
751 757
752 IRDA_DEBUG(2, "%s()\n", __func__); 758 IRDA_DEBUG(2, "%s()\n", __func__);
753 759
760 lock_kernel();
754 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && 761 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
755 (sk->sk_type != SOCK_DGRAM)) 762 (sk->sk_type != SOCK_DGRAM))
756 return -EOPNOTSUPP; 763 goto out;
757 764
758 if (sk->sk_state != TCP_LISTEN) { 765 if (sk->sk_state != TCP_LISTEN) {
759 sk->sk_max_ack_backlog = backlog; 766 sk->sk_max_ack_backlog = backlog;
760 sk->sk_state = TCP_LISTEN; 767 sk->sk_state = TCP_LISTEN;
761 768
762 return 0; 769 err = 0;
763 } 770 }
771out:
772 unlock_kernel();
764 773
765 return -EOPNOTSUPP; 774 return err;
766} 775}
767 776
768/* 777/*
@@ -783,36 +792,40 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
783 if (addr_len != sizeof(struct sockaddr_irda)) 792 if (addr_len != sizeof(struct sockaddr_irda))
784 return -EINVAL; 793 return -EINVAL;
785 794
795 lock_kernel();
786#ifdef CONFIG_IRDA_ULTRA 796#ifdef CONFIG_IRDA_ULTRA
787 /* Special care for Ultra sockets */ 797 /* Special care for Ultra sockets */
788 if ((sk->sk_type == SOCK_DGRAM) && 798 if ((sk->sk_type == SOCK_DGRAM) &&
789 (sk->sk_protocol == IRDAPROTO_ULTRA)) { 799 (sk->sk_protocol == IRDAPROTO_ULTRA)) {
790 self->pid = addr->sir_lsap_sel; 800 self->pid = addr->sir_lsap_sel;
801 err = -EOPNOTSUPP;
791 if (self->pid & 0x80) { 802 if (self->pid & 0x80) {
792 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); 803 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
793 return -EOPNOTSUPP; 804 goto out;
794 } 805 }
795 err = irda_open_lsap(self, self->pid); 806 err = irda_open_lsap(self, self->pid);
796 if (err < 0) 807 if (err < 0)
797 return err; 808 goto out;
798 809
799 /* Pretend we are connected */ 810 /* Pretend we are connected */
800 sock->state = SS_CONNECTED; 811 sock->state = SS_CONNECTED;
801 sk->sk_state = TCP_ESTABLISHED; 812 sk->sk_state = TCP_ESTABLISHED;
813 err = 0;
802 814
803 return 0; 815 goto out;
804 } 816 }
805#endif /* CONFIG_IRDA_ULTRA */ 817#endif /* CONFIG_IRDA_ULTRA */
806 818
807 self->ias_obj = irias_new_object(addr->sir_name, jiffies); 819 self->ias_obj = irias_new_object(addr->sir_name, jiffies);
820 err = -ENOMEM;
808 if (self->ias_obj == NULL) 821 if (self->ias_obj == NULL)
809 return -ENOMEM; 822 goto out;
810 823
811 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); 824 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
812 if (err < 0) { 825 if (err < 0) {
813 kfree(self->ias_obj->name); 826 kfree(self->ias_obj->name);
814 kfree(self->ias_obj); 827 kfree(self->ias_obj);
815 return err; 828 goto out;
816 } 829 }
817 830
818 /* Register with LM-IAS */ 831 /* Register with LM-IAS */
@@ -820,7 +833,10 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
820 self->stsap_sel, IAS_KERNEL_ATTR); 833 self->stsap_sel, IAS_KERNEL_ATTR);
821 irias_insert_object(self->ias_obj); 834 irias_insert_object(self->ias_obj);
822 835
823 return 0; 836 err = 0;
837out:
838 unlock_kernel();
839 return err;
824} 840}
825 841
826/* 842/*
@@ -839,22 +855,26 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
839 855
840 IRDA_DEBUG(2, "%s()\n", __func__); 856 IRDA_DEBUG(2, "%s()\n", __func__);
841 857
842 err = irda_create(sock_net(sk), newsock, sk->sk_protocol); 858 lock_kernel();
859 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
843 if (err) 860 if (err)
844 return err; 861 goto out;
845 862
863 err = -EINVAL;
846 if (sock->state != SS_UNCONNECTED) 864 if (sock->state != SS_UNCONNECTED)
847 return -EINVAL; 865 goto out;
848 866
849 if ((sk = sock->sk) == NULL) 867 if ((sk = sock->sk) == NULL)
850 return -EINVAL; 868 goto out;
851 869
870 err = -EOPNOTSUPP;
852 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && 871 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
853 (sk->sk_type != SOCK_DGRAM)) 872 (sk->sk_type != SOCK_DGRAM))
854 return -EOPNOTSUPP; 873 goto out;
855 874
875 err = -EINVAL;
856 if (sk->sk_state != TCP_LISTEN) 876 if (sk->sk_state != TCP_LISTEN)
857 return -EINVAL; 877 goto out;
858 878
859 /* 879 /*
860 * The read queue this time is holding sockets ready to use 880 * The read queue this time is holding sockets ready to use
@@ -875,18 +895,20 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
875 break; 895 break;
876 896
877 /* Non blocking operation */ 897 /* Non blocking operation */
898 err = -EWOULDBLOCK;
878 if (flags & O_NONBLOCK) 899 if (flags & O_NONBLOCK)
879 return -EWOULDBLOCK; 900 goto out;
880 901
881 err = wait_event_interruptible(*(sk->sk_sleep), 902 err = wait_event_interruptible(*(sk->sk_sleep),
882 skb_peek(&sk->sk_receive_queue)); 903 skb_peek(&sk->sk_receive_queue));
883 if (err) 904 if (err)
884 return err; 905 goto out;
885 } 906 }
886 907
887 newsk = newsock->sk; 908 newsk = newsock->sk;
909 err = -EIO;
888 if (newsk == NULL) 910 if (newsk == NULL)
889 return -EIO; 911 goto out;
890 912
891 newsk->sk_state = TCP_ESTABLISHED; 913 newsk->sk_state = TCP_ESTABLISHED;
892 914
@@ -894,10 +916,11 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
894 916
895 /* Now attach up the new socket */ 917 /* Now attach up the new socket */
896 new->tsap = irttp_dup(self->tsap, new); 918 new->tsap = irttp_dup(self->tsap, new);
919 err = -EPERM; /* value does not seem to make sense. -arnd */
897 if (!new->tsap) { 920 if (!new->tsap) {
898 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); 921 IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
899 kfree_skb(skb); 922 kfree_skb(skb);
900 return -1; 923 goto out;
901 } 924 }
902 925
903 new->stsap_sel = new->tsap->stsap_sel; 926 new->stsap_sel = new->tsap->stsap_sel;
@@ -921,8 +944,10 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
921 newsock->state = SS_CONNECTED; 944 newsock->state = SS_CONNECTED;
922 945
923 irda_connect_response(new); 946 irda_connect_response(new);
924 947 err = 0;
925 return 0; 948out:
949 unlock_kernel();
950 return err;
926} 951}
927 952
928/* 953/*
@@ -955,28 +980,34 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
955 980
956 IRDA_DEBUG(2, "%s(%p)\n", __func__, self); 981 IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
957 982
983 lock_kernel();
958 /* Don't allow connect for Ultra sockets */ 984 /* Don't allow connect for Ultra sockets */
985 err = -ESOCKTNOSUPPORT;
959 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) 986 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
960 return -ESOCKTNOSUPPORT; 987 goto out;
961 988
962 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 989 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
963 sock->state = SS_CONNECTED; 990 sock->state = SS_CONNECTED;
964 return 0; /* Connect completed during a ERESTARTSYS event */ 991 err = 0;
992 goto out; /* Connect completed during a ERESTARTSYS event */
965 } 993 }
966 994
967 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 995 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
968 sock->state = SS_UNCONNECTED; 996 sock->state = SS_UNCONNECTED;
969 return -ECONNREFUSED; 997 err = -ECONNREFUSED;
998 goto out;
970 } 999 }
971 1000
1001 err = -EISCONN; /* No reconnect on a seqpacket socket */
972 if (sk->sk_state == TCP_ESTABLISHED) 1002 if (sk->sk_state == TCP_ESTABLISHED)
973 return -EISCONN; /* No reconnect on a seqpacket socket */ 1003 goto out;
974 1004
975 sk->sk_state = TCP_CLOSE; 1005 sk->sk_state = TCP_CLOSE;
976 sock->state = SS_UNCONNECTED; 1006 sock->state = SS_UNCONNECTED;
977 1007
1008 err = -EINVAL;
978 if (addr_len != sizeof(struct sockaddr_irda)) 1009 if (addr_len != sizeof(struct sockaddr_irda))
979 return -EINVAL; 1010 goto out;
980 1011
981 /* Check if user supplied any destination device address */ 1012 /* Check if user supplied any destination device address */
982 if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { 1013 if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) {
@@ -984,7 +1015,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
984 err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); 1015 err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
985 if (err) { 1016 if (err) {
986 IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); 1017 IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__);
987 return err; 1018 goto out;
988 } 1019 }
989 } else { 1020 } else {
990 /* Use the one provided by the user */ 1021 /* Use the one provided by the user */
@@ -1000,7 +1031,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1000 err = irda_find_lsap_sel(self, addr->sir_name); 1031 err = irda_find_lsap_sel(self, addr->sir_name);
1001 if (err) { 1032 if (err) {
1002 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); 1033 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
1003 return err; 1034 goto out;
1004 } 1035 }
1005 } else { 1036 } else {
1006 /* Directly connect to the remote LSAP 1037 /* Directly connect to the remote LSAP
@@ -1025,29 +1056,35 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1025 self->max_sdu_size_rx, NULL); 1056 self->max_sdu_size_rx, NULL);
1026 if (err) { 1057 if (err) {
1027 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); 1058 IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
1028 return err; 1059 goto out;
1029 } 1060 }
1030 1061
1031 /* Now the loop */ 1062 /* Now the loop */
1063 err = -EINPROGRESS;
1032 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) 1064 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
1033 return -EINPROGRESS; 1065 goto out;
1034 1066
1067 err = -ERESTARTSYS;
1035 if (wait_event_interruptible(*(sk->sk_sleep), 1068 if (wait_event_interruptible(*(sk->sk_sleep),
1036 (sk->sk_state != TCP_SYN_SENT))) 1069 (sk->sk_state != TCP_SYN_SENT)))
1037 return -ERESTARTSYS; 1070 goto out;
1038 1071
1039 if (sk->sk_state != TCP_ESTABLISHED) { 1072 if (sk->sk_state != TCP_ESTABLISHED) {
1040 sock->state = SS_UNCONNECTED; 1073 sock->state = SS_UNCONNECTED;
1041 err = sock_error(sk); 1074 err = sock_error(sk);
1042 return err? err : -ECONNRESET; 1075 if (!err)
1076 err = -ECONNRESET;
1077 goto out;
1043 } 1078 }
1044 1079
1045 sock->state = SS_CONNECTED; 1080 sock->state = SS_CONNECTED;
1046 1081
1047 /* At this point, IrLMP has assigned our source address */ 1082 /* At this point, IrLMP has assigned our source address */
1048 self->saddr = irttp_get_saddr(self->tsap); 1083 self->saddr = irttp_get_saddr(self->tsap);
1049 1084 err = 0;
1050 return 0; 1085out:
1086 unlock_kernel();
1087 return err;
1051} 1088}
1052 1089
1053static struct proto irda_proto = { 1090static struct proto irda_proto = {
@@ -1062,7 +1099,8 @@ static struct proto irda_proto = {
1062 * Create IrDA socket 1099 * Create IrDA socket
1063 * 1100 *
1064 */ 1101 */
1065static int irda_create(struct net *net, struct socket *sock, int protocol) 1102static int irda_create(struct net *net, struct socket *sock, int protocol,
1103 int kern)
1066{ 1104{
1067 struct sock *sk; 1105 struct sock *sk;
1068 struct irda_sock *self; 1106 struct irda_sock *self;
@@ -1192,6 +1230,7 @@ static int irda_release(struct socket *sock)
1192 if (sk == NULL) 1230 if (sk == NULL)
1193 return 0; 1231 return 0;
1194 1232
1233 lock_kernel();
1195 lock_sock(sk); 1234 lock_sock(sk);
1196 sk->sk_state = TCP_CLOSE; 1235 sk->sk_state = TCP_CLOSE;
1197 sk->sk_shutdown |= SEND_SHUTDOWN; 1236 sk->sk_shutdown |= SEND_SHUTDOWN;
@@ -1210,6 +1249,7 @@ static int irda_release(struct socket *sock)
1210 /* Destroy networking socket if we are the last reference on it, 1249 /* Destroy networking socket if we are the last reference on it,
1211 * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ 1250 * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
1212 sock_put(sk); 1251 sock_put(sk);
1252 unlock_kernel();
1213 1253
1214 /* Notes on socket locking and deallocation... - Jean II 1254 /* Notes on socket locking and deallocation... - Jean II
1215 * In theory we should put pairs of sock_hold() / sock_put() to 1255 * In theory we should put pairs of sock_hold() / sock_put() to
@@ -1257,28 +1297,37 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1257 1297
1258 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); 1298 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1259 1299
1300 lock_kernel();
1260 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1301 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
1261 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1302 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
1262 MSG_NOSIGNAL)) 1303 MSG_NOSIGNAL)) {
1263 return -EINVAL; 1304 err = -EINVAL;
1305 goto out;
1306 }
1264 1307
1265 if (sk->sk_shutdown & SEND_SHUTDOWN) 1308 if (sk->sk_shutdown & SEND_SHUTDOWN)
1266 goto out_err; 1309 goto out_err;
1267 1310
1268 if (sk->sk_state != TCP_ESTABLISHED) 1311 if (sk->sk_state != TCP_ESTABLISHED) {
1269 return -ENOTCONN; 1312 err = -ENOTCONN;
1313 goto out;
1314 }
1270 1315
1271 self = irda_sk(sk); 1316 self = irda_sk(sk);
1272 1317
1273 /* Check if IrTTP is wants us to slow down */ 1318 /* Check if IrTTP is wants us to slow down */
1274 1319
1275 if (wait_event_interruptible(*(sk->sk_sleep), 1320 if (wait_event_interruptible(*(sk->sk_sleep),
1276 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) 1321 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
1277 return -ERESTARTSYS; 1322 err = -ERESTARTSYS;
1323 goto out;
1324 }
1278 1325
1279 /* Check if we are still connected */ 1326 /* Check if we are still connected */
1280 if (sk->sk_state != TCP_ESTABLISHED) 1327 if (sk->sk_state != TCP_ESTABLISHED) {
1281 return -ENOTCONN; 1328 err = -ENOTCONN;
1329 goto out;
1330 }
1282 1331
1283 /* Check that we don't send out too big frames */ 1332 /* Check that we don't send out too big frames */
1284 if (len > self->max_data_size) { 1333 if (len > self->max_data_size) {
@@ -1310,11 +1359,16 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1310 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); 1359 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1311 goto out_err; 1360 goto out_err;
1312 } 1361 }
1362
1363 unlock_kernel();
1313 /* Tell client how much data we actually sent */ 1364 /* Tell client how much data we actually sent */
1314 return len; 1365 return len;
1315 1366
1316 out_err: 1367out_err:
1317 return sk_stream_error(sk, msg->msg_flags, err); 1368 err = sk_stream_error(sk, msg->msg_flags, err);
1369out:
1370 unlock_kernel();
1371 return err;
1318 1372
1319} 1373}
1320 1374
@@ -1335,13 +1389,14 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1335 1389
1336 IRDA_DEBUG(4, "%s()\n", __func__); 1390 IRDA_DEBUG(4, "%s()\n", __func__);
1337 1391
1392 lock_kernel();
1338 if ((err = sock_error(sk)) < 0) 1393 if ((err = sock_error(sk)) < 0)
1339 return err; 1394 goto out;
1340 1395
1341 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1396 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1342 flags & MSG_DONTWAIT, &err); 1397 flags & MSG_DONTWAIT, &err);
1343 if (!skb) 1398 if (!skb)
1344 return err; 1399 goto out;
1345 1400
1346 skb_reset_transport_header(skb); 1401 skb_reset_transport_header(skb);
1347 copied = skb->len; 1402 copied = skb->len;
@@ -1369,8 +1424,12 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1369 irttp_flow_request(self->tsap, FLOW_START); 1424 irttp_flow_request(self->tsap, FLOW_START);
1370 } 1425 }
1371 } 1426 }
1372 1427 unlock_kernel();
1373 return copied; 1428 return copied;
1429
1430out:
1431 unlock_kernel();
1432 return err;
1374} 1433}
1375 1434
1376/* 1435/*
@@ -1388,15 +1447,19 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1388 1447
1389 IRDA_DEBUG(3, "%s()\n", __func__); 1448 IRDA_DEBUG(3, "%s()\n", __func__);
1390 1449
1450 lock_kernel();
1391 if ((err = sock_error(sk)) < 0) 1451 if ((err = sock_error(sk)) < 0)
1392 return err; 1452 goto out;
1393 1453
1454 err = -EINVAL;
1394 if (sock->flags & __SO_ACCEPTCON) 1455 if (sock->flags & __SO_ACCEPTCON)
1395 return(-EINVAL); 1456 goto out;
1396 1457
1458 err =-EOPNOTSUPP;
1397 if (flags & MSG_OOB) 1459 if (flags & MSG_OOB)
1398 return -EOPNOTSUPP; 1460 goto out;
1399 1461
1462 err = 0;
1400 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 1463 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
1401 timeo = sock_rcvtimeo(sk, noblock); 1464 timeo = sock_rcvtimeo(sk, noblock);
1402 1465
@@ -1408,7 +1471,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1408 1471
1409 if (skb == NULL) { 1472 if (skb == NULL) {
1410 DEFINE_WAIT(wait); 1473 DEFINE_WAIT(wait);
1411 int ret = 0; 1474 err = 0;
1412 1475
1413 if (copied >= target) 1476 if (copied >= target)
1414 break; 1477 break;
@@ -1418,25 +1481,25 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1418 /* 1481 /*
1419 * POSIX 1003.1g mandates this order. 1482 * POSIX 1003.1g mandates this order.
1420 */ 1483 */
1421 ret = sock_error(sk); 1484 err = sock_error(sk);
1422 if (ret) 1485 if (err)
1423 ; 1486 ;
1424 else if (sk->sk_shutdown & RCV_SHUTDOWN) 1487 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1425 ; 1488 ;
1426 else if (noblock) 1489 else if (noblock)
1427 ret = -EAGAIN; 1490 err = -EAGAIN;
1428 else if (signal_pending(current)) 1491 else if (signal_pending(current))
1429 ret = sock_intr_errno(timeo); 1492 err = sock_intr_errno(timeo);
1430 else if (sk->sk_state != TCP_ESTABLISHED) 1493 else if (sk->sk_state != TCP_ESTABLISHED)
1431 ret = -ENOTCONN; 1494 err = -ENOTCONN;
1432 else if (skb_peek(&sk->sk_receive_queue) == NULL) 1495 else if (skb_peek(&sk->sk_receive_queue) == NULL)
1433 /* Wait process until data arrives */ 1496 /* Wait process until data arrives */
1434 schedule(); 1497 schedule();
1435 1498
1436 finish_wait(sk->sk_sleep, &wait); 1499 finish_wait(sk->sk_sleep, &wait);
1437 1500
1438 if (ret) 1501 if (err)
1439 return ret; 1502 goto out;
1440 if (sk->sk_shutdown & RCV_SHUTDOWN) 1503 if (sk->sk_shutdown & RCV_SHUTDOWN)
1441 break; 1504 break;
1442 1505
@@ -1489,7 +1552,9 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1489 } 1552 }
1490 } 1553 }
1491 1554
1492 return copied; 1555out:
1556 unlock_kernel();
1557 return err ? : copied;
1493} 1558}
1494 1559
1495/* 1560/*
@@ -1507,18 +1572,23 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1507 struct sk_buff *skb; 1572 struct sk_buff *skb;
1508 int err; 1573 int err;
1509 1574
1575 lock_kernel();
1576
1510 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); 1577 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1511 1578
1579 err = -EINVAL;
1512 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) 1580 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1513 return -EINVAL; 1581 goto out;
1514 1582
1515 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1583 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1516 send_sig(SIGPIPE, current, 0); 1584 send_sig(SIGPIPE, current, 0);
1517 return -EPIPE; 1585 err = -EPIPE;
1586 goto out;
1518 } 1587 }
1519 1588
1589 err = -ENOTCONN;
1520 if (sk->sk_state != TCP_ESTABLISHED) 1590 if (sk->sk_state != TCP_ESTABLISHED)
1521 return -ENOTCONN; 1591 goto out;
1522 1592
1523 self = irda_sk(sk); 1593 self = irda_sk(sk);
1524 1594
@@ -1535,8 +1605,9 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1535 1605
1536 skb = sock_alloc_send_skb(sk, len + self->max_header_size, 1606 skb = sock_alloc_send_skb(sk, len + self->max_header_size,
1537 msg->msg_flags & MSG_DONTWAIT, &err); 1607 msg->msg_flags & MSG_DONTWAIT, &err);
1608 err = -ENOBUFS;
1538 if (!skb) 1609 if (!skb)
1539 return -ENOBUFS; 1610 goto out;
1540 1611
1541 skb_reserve(skb, self->max_header_size); 1612 skb_reserve(skb, self->max_header_size);
1542 skb_reset_transport_header(skb); 1613 skb_reset_transport_header(skb);
@@ -1546,7 +1617,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1546 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1617 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1547 if (err) { 1618 if (err) {
1548 kfree_skb(skb); 1619 kfree_skb(skb);
1549 return err; 1620 goto out;
1550 } 1621 }
1551 1622
1552 /* 1623 /*
@@ -1556,9 +1627,13 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1556 err = irttp_udata_request(self->tsap, skb); 1627 err = irttp_udata_request(self->tsap, skb);
1557 if (err) { 1628 if (err) {
1558 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); 1629 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1559 return err; 1630 goto out;
1560 } 1631 }
1632 unlock_kernel();
1561 return len; 1633 return len;
1634out:
1635 unlock_kernel();
1636 return err;
1562} 1637}
1563 1638
1564/* 1639/*
@@ -1580,12 +1655,15 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1580 1655
1581 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); 1656 IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
1582 1657
1658 lock_kernel();
1659 err = -EINVAL;
1583 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) 1660 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
1584 return -EINVAL; 1661 goto out;
1585 1662
1663 err = -EPIPE;
1586 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1664 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1587 send_sig(SIGPIPE, current, 0); 1665 send_sig(SIGPIPE, current, 0);
1588 return -EPIPE; 1666 goto out;
1589 } 1667 }
1590 1668
1591 self = irda_sk(sk); 1669 self = irda_sk(sk);
@@ -1593,16 +1671,18 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1593 /* Check if an address was specified with sendto. Jean II */ 1671 /* Check if an address was specified with sendto. Jean II */
1594 if (msg->msg_name) { 1672 if (msg->msg_name) {
1595 struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; 1673 struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name;
1674 err = -EINVAL;
1596 /* Check address, extract pid. Jean II */ 1675 /* Check address, extract pid. Jean II */
1597 if (msg->msg_namelen < sizeof(*addr)) 1676 if (msg->msg_namelen < sizeof(*addr))
1598 return -EINVAL; 1677 goto out;
1599 if (addr->sir_family != AF_IRDA) 1678 if (addr->sir_family != AF_IRDA)
1600 return -EINVAL; 1679 goto out;
1601 1680
1602 pid = addr->sir_lsap_sel; 1681 pid = addr->sir_lsap_sel;
1603 if (pid & 0x80) { 1682 if (pid & 0x80) {
1604 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); 1683 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
1605 return -EOPNOTSUPP; 1684 err = -EOPNOTSUPP;
1685 goto out;
1606 } 1686 }
1607 } else { 1687 } else {
1608 /* Check that the socket is properly bound to an Ultra 1688 /* Check that the socket is properly bound to an Ultra
@@ -1611,7 +1691,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1611 (sk->sk_state != TCP_ESTABLISHED)) { 1691 (sk->sk_state != TCP_ESTABLISHED)) {
1612 IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", 1692 IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n",
1613 __func__); 1693 __func__);
1614 return -ENOTCONN; 1694 err = -ENOTCONN;
1695 goto out;
1615 } 1696 }
1616 /* Use PID from socket */ 1697 /* Use PID from socket */
1617 bound = 1; 1698 bound = 1;
@@ -1630,8 +1711,9 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1630 1711
1631 skb = sock_alloc_send_skb(sk, len + self->max_header_size, 1712 skb = sock_alloc_send_skb(sk, len + self->max_header_size,
1632 msg->msg_flags & MSG_DONTWAIT, &err); 1713 msg->msg_flags & MSG_DONTWAIT, &err);
1714 err = -ENOBUFS;
1633 if (!skb) 1715 if (!skb)
1634 return -ENOBUFS; 1716 goto out;
1635 1717
1636 skb_reserve(skb, self->max_header_size); 1718 skb_reserve(skb, self->max_header_size);
1637 skb_reset_transport_header(skb); 1719 skb_reset_transport_header(skb);
@@ -1641,16 +1723,16 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1641 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1723 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1642 if (err) { 1724 if (err) {
1643 kfree_skb(skb); 1725 kfree_skb(skb);
1644 return err; 1726 goto out;
1645 } 1727 }
1646 1728
1647 err = irlmp_connless_data_request((bound ? self->lsap : NULL), 1729 err = irlmp_connless_data_request((bound ? self->lsap : NULL),
1648 skb, pid); 1730 skb, pid);
1649 if (err) { 1731 if (err)
1650 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); 1732 IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
1651 return err; 1733out:
1652 } 1734 unlock_kernel();
1653 return len; 1735 return err ? : len;
1654} 1736}
1655#endif /* CONFIG_IRDA_ULTRA */ 1737#endif /* CONFIG_IRDA_ULTRA */
1656 1738
@@ -1664,6 +1746,8 @@ static int irda_shutdown(struct socket *sock, int how)
1664 1746
1665 IRDA_DEBUG(1, "%s(%p)\n", __func__, self); 1747 IRDA_DEBUG(1, "%s(%p)\n", __func__, self);
1666 1748
1749 lock_kernel();
1750
1667 sk->sk_state = TCP_CLOSE; 1751 sk->sk_state = TCP_CLOSE;
1668 sk->sk_shutdown |= SEND_SHUTDOWN; 1752 sk->sk_shutdown |= SEND_SHUTDOWN;
1669 sk->sk_state_change(sk); 1753 sk->sk_state_change(sk);
@@ -1684,6 +1768,8 @@ static int irda_shutdown(struct socket *sock, int how)
1684 self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ 1768 self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */
1685 self->saddr = 0x0; /* so IrLMP assign us any link */ 1769 self->saddr = 0x0; /* so IrLMP assign us any link */
1686 1770
1771 unlock_kernel();
1772
1687 return 0; 1773 return 0;
1688} 1774}
1689 1775
@@ -1699,6 +1785,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1699 1785
1700 IRDA_DEBUG(4, "%s()\n", __func__); 1786 IRDA_DEBUG(4, "%s()\n", __func__);
1701 1787
1788 lock_kernel();
1702 poll_wait(file, sk->sk_sleep, wait); 1789 poll_wait(file, sk->sk_sleep, wait);
1703 mask = 0; 1790 mask = 0;
1704 1791
@@ -1746,18 +1833,34 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1746 default: 1833 default:
1747 break; 1834 break;
1748 } 1835 }
1836 unlock_kernel();
1749 return mask; 1837 return mask;
1750} 1838}
1751 1839
1840static unsigned int irda_datagram_poll(struct file *file, struct socket *sock,
1841 poll_table *wait)
1842{
1843 int err;
1844
1845 lock_kernel();
1846 err = datagram_poll(file, sock, wait);
1847 unlock_kernel();
1848
1849 return err;
1850}
1851
1752/* 1852/*
1753 * Function irda_ioctl (sock, cmd, arg) 1853 * Function irda_ioctl (sock, cmd, arg)
1754 */ 1854 */
1755static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1855static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1756{ 1856{
1757 struct sock *sk = sock->sk; 1857 struct sock *sk = sock->sk;
1858 int err;
1758 1859
1759 IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); 1860 IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd);
1760 1861
1862 lock_kernel();
1863 err = -EINVAL;
1761 switch (cmd) { 1864 switch (cmd) {
1762 case TIOCOUTQ: { 1865 case TIOCOUTQ: {
1763 long amount; 1866 long amount;
@@ -1765,9 +1868,8 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1765 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1868 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1766 if (amount < 0) 1869 if (amount < 0)
1767 amount = 0; 1870 amount = 0;
1768 if (put_user(amount, (unsigned int __user *)arg)) 1871 err = put_user(amount, (unsigned int __user *)arg);
1769 return -EFAULT; 1872 break;
1770 return 0;
1771 } 1873 }
1772 1874
1773 case TIOCINQ: { 1875 case TIOCINQ: {
@@ -1776,15 +1878,14 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1776 /* These two are safe on a single CPU system as only user tasks fiddle here */ 1878 /* These two are safe on a single CPU system as only user tasks fiddle here */
1777 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1879 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1778 amount = skb->len; 1880 amount = skb->len;
1779 if (put_user(amount, (unsigned int __user *)arg)) 1881 err = put_user(amount, (unsigned int __user *)arg);
1780 return -EFAULT; 1882 break;
1781 return 0;
1782 } 1883 }
1783 1884
1784 case SIOCGSTAMP: 1885 case SIOCGSTAMP:
1785 if (sk != NULL) 1886 if (sk != NULL)
1786 return sock_get_timestamp(sk, (struct timeval __user *)arg); 1887 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
1787 return -EINVAL; 1888 break;
1788 1889
1789 case SIOCGIFADDR: 1890 case SIOCGIFADDR:
1790 case SIOCSIFADDR: 1891 case SIOCSIFADDR:
@@ -1796,14 +1897,14 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1796 case SIOCSIFNETMASK: 1897 case SIOCSIFNETMASK:
1797 case SIOCGIFMETRIC: 1898 case SIOCGIFMETRIC:
1798 case SIOCSIFMETRIC: 1899 case SIOCSIFMETRIC:
1799 return -EINVAL; 1900 break;
1800 default: 1901 default:
1801 IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); 1902 IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__);
1802 return -ENOIOCTLCMD; 1903 err = -ENOIOCTLCMD;
1803 } 1904 }
1905 unlock_kernel();
1804 1906
1805 /*NOTREACHED*/ 1907 return err;
1806 return 0;
1807} 1908}
1808 1909
1809#ifdef CONFIG_COMPAT 1910#ifdef CONFIG_COMPAT
@@ -1825,7 +1926,7 @@ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
1825 * Set some options for the socket 1926 * Set some options for the socket
1826 * 1927 *
1827 */ 1928 */
1828static int irda_setsockopt(struct socket *sock, int level, int optname, 1929static int __irda_setsockopt(struct socket *sock, int level, int optname,
1829 char __user *optval, unsigned int optlen) 1930 char __user *optval, unsigned int optlen)
1830{ 1931{
1831 struct sock *sk = sock->sk; 1932 struct sock *sk = sock->sk;
@@ -2083,6 +2184,18 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
2083 return 0; 2184 return 0;
2084} 2185}
2085 2186
2187static int irda_setsockopt(struct socket *sock, int level, int optname,
2188 char __user *optval, unsigned int optlen)
2189{
2190 int err;
2191
2192 lock_kernel();
2193 err = __irda_setsockopt(sock, level, optname, optval, optlen);
2194 unlock_kernel();
2195
2196 return err;
2197}
2198
2086/* 2199/*
2087 * Function irda_extract_ias_value(ias_opt, ias_value) 2200 * Function irda_extract_ias_value(ias_opt, ias_value)
2088 * 2201 *
@@ -2135,7 +2248,7 @@ static int irda_extract_ias_value(struct irda_ias_set *ias_opt,
2135/* 2248/*
2136 * Function irda_getsockopt (sock, level, optname, optval, optlen) 2249 * Function irda_getsockopt (sock, level, optname, optval, optlen)
2137 */ 2250 */
2138static int irda_getsockopt(struct socket *sock, int level, int optname, 2251static int __irda_getsockopt(struct socket *sock, int level, int optname,
2139 char __user *optval, int __user *optlen) 2252 char __user *optval, int __user *optlen)
2140{ 2253{
2141 struct sock *sk = sock->sk; 2254 struct sock *sk = sock->sk;
@@ -2463,13 +2576,25 @@ bed:
2463 return 0; 2576 return 0;
2464} 2577}
2465 2578
2579static int irda_getsockopt(struct socket *sock, int level, int optname,
2580 char __user *optval, int __user *optlen)
2581{
2582 int err;
2583
2584 lock_kernel();
2585 err = __irda_getsockopt(sock, level, optname, optval, optlen);
2586 unlock_kernel();
2587
2588 return err;
2589}
2590
2466static const struct net_proto_family irda_family_ops = { 2591static const struct net_proto_family irda_family_ops = {
2467 .family = PF_IRDA, 2592 .family = PF_IRDA,
2468 .create = irda_create, 2593 .create = irda_create,
2469 .owner = THIS_MODULE, 2594 .owner = THIS_MODULE,
2470}; 2595};
2471 2596
2472static const struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = { 2597static const struct proto_ops irda_stream_ops = {
2473 .family = PF_IRDA, 2598 .family = PF_IRDA,
2474 .owner = THIS_MODULE, 2599 .owner = THIS_MODULE,
2475 .release = irda_release, 2600 .release = irda_release,
@@ -2493,7 +2618,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = {
2493 .sendpage = sock_no_sendpage, 2618 .sendpage = sock_no_sendpage,
2494}; 2619};
2495 2620
2496static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = { 2621static const struct proto_ops irda_seqpacket_ops = {
2497 .family = PF_IRDA, 2622 .family = PF_IRDA,
2498 .owner = THIS_MODULE, 2623 .owner = THIS_MODULE,
2499 .release = irda_release, 2624 .release = irda_release,
@@ -2502,7 +2627,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = {
2502 .socketpair = sock_no_socketpair, 2627 .socketpair = sock_no_socketpair,
2503 .accept = irda_accept, 2628 .accept = irda_accept,
2504 .getname = irda_getname, 2629 .getname = irda_getname,
2505 .poll = datagram_poll, 2630 .poll = irda_datagram_poll,
2506 .ioctl = irda_ioctl, 2631 .ioctl = irda_ioctl,
2507#ifdef CONFIG_COMPAT 2632#ifdef CONFIG_COMPAT
2508 .compat_ioctl = irda_compat_ioctl, 2633 .compat_ioctl = irda_compat_ioctl,
@@ -2517,7 +2642,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = {
2517 .sendpage = sock_no_sendpage, 2642 .sendpage = sock_no_sendpage,
2518}; 2643};
2519 2644
2520static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = { 2645static const struct proto_ops irda_dgram_ops = {
2521 .family = PF_IRDA, 2646 .family = PF_IRDA,
2522 .owner = THIS_MODULE, 2647 .owner = THIS_MODULE,
2523 .release = irda_release, 2648 .release = irda_release,
@@ -2526,7 +2651,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
2526 .socketpair = sock_no_socketpair, 2651 .socketpair = sock_no_socketpair,
2527 .accept = irda_accept, 2652 .accept = irda_accept,
2528 .getname = irda_getname, 2653 .getname = irda_getname,
2529 .poll = datagram_poll, 2654 .poll = irda_datagram_poll,
2530 .ioctl = irda_ioctl, 2655 .ioctl = irda_ioctl,
2531#ifdef CONFIG_COMPAT 2656#ifdef CONFIG_COMPAT
2532 .compat_ioctl = irda_compat_ioctl, 2657 .compat_ioctl = irda_compat_ioctl,
@@ -2542,7 +2667,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
2542}; 2667};
2543 2668
2544#ifdef CONFIG_IRDA_ULTRA 2669#ifdef CONFIG_IRDA_ULTRA
2545static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = { 2670static const struct proto_ops irda_ultra_ops = {
2546 .family = PF_IRDA, 2671 .family = PF_IRDA,
2547 .owner = THIS_MODULE, 2672 .owner = THIS_MODULE,
2548 .release = irda_release, 2673 .release = irda_release,
@@ -2551,7 +2676,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = {
2551 .socketpair = sock_no_socketpair, 2676 .socketpair = sock_no_socketpair,
2552 .accept = sock_no_accept, 2677 .accept = sock_no_accept,
2553 .getname = irda_getname, 2678 .getname = irda_getname,
2554 .poll = datagram_poll, 2679 .poll = irda_datagram_poll,
2555 .ioctl = irda_ioctl, 2680 .ioctl = irda_ioctl,
2556#ifdef CONFIG_COMPAT 2681#ifdef CONFIG_COMPAT
2557 .compat_ioctl = irda_compat_ioctl, 2682 .compat_ioctl = irda_compat_ioctl,
@@ -2567,13 +2692,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = {
2567}; 2692};
2568#endif /* CONFIG_IRDA_ULTRA */ 2693#endif /* CONFIG_IRDA_ULTRA */
2569 2694
2570SOCKOPS_WRAP(irda_stream, PF_IRDA);
2571SOCKOPS_WRAP(irda_seqpacket, PF_IRDA);
2572SOCKOPS_WRAP(irda_dgram, PF_IRDA);
2573#ifdef CONFIG_IRDA_ULTRA
2574SOCKOPS_WRAP(irda_ultra, PF_IRDA);
2575#endif /* CONFIG_IRDA_ULTRA */
2576
2577/* 2695/*
2578 * Function irsock_init (pro) 2696 * Function irsock_init (pro)
2579 * 2697 *
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index eafc010907c2..3c1754023022 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -30,6 +30,7 @@
30 ********************************************************************/ 30 ********************************************************************/
31 31
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/sched.h>
33 34
34#include <net/irda/irda.h> 35#include <net/irda/irda.h>
35#include <net/irda/irlmp.h> 36#include <net/irda/irlmp.h>
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 62116829b817..315ead3cb926 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/sched.h>
33#include <linux/seq_file.h> 34#include <linux/seq_file.h>
34#include <linux/random.h> 35#include <linux/random.h>
35#include <linux/netdevice.h> 36#include <linux/netdevice.h>
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 7b6b631f647f..d340110f5c0c 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -30,6 +30,7 @@
30#include <linux/inetdevice.h> 30#include <linux/inetdevice.h>
31#include <linux/if_arp.h> 31#include <linux/if_arp.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/sched.h>
33#include <net/arp.h> 34#include <net/arp.h>
34 35
35#include <net/irda/irda.h> 36#include <net/irda/irda.h>
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index cf9a4b531a98..b26dee784aba 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include "irnet_irda.h" /* Private header */ 11#include "irnet_irda.h" /* Private header */
12#include <linux/sched.h>
12#include <linux/seq_file.h> 13#include <linux/seq_file.h>
13#include <asm/unaligned.h> 14#include <asm/unaligned.h>
14 15
@@ -1402,8 +1403,8 @@ irnet_connect_indication(void * instance,
1402 /* Socket already connecting ? On primary ? */ 1403 /* Socket already connecting ? On primary ? */
1403 if(0 1404 if(0
1404#ifdef ALLOW_SIMULT_CONNECT 1405#ifdef ALLOW_SIMULT_CONNECT
1405 || ((irttp_is_primary(server->tsap) == 1) /* primary */ 1406 || ((irttp_is_primary(server->tsap) == 1) && /* primary */
1406 && (test_and_clear_bit(0, &new->ttp_connect))) 1407 (test_and_clear_bit(0, &new->ttp_connect)))
1407#endif /* ALLOW_SIMULT_CONNECT */ 1408#endif /* ALLOW_SIMULT_CONNECT */
1408 ) 1409 )
1409 { 1410 {
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 68cbcb19cbd8..7dea882dbb75 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -13,6 +13,7 @@
13 * 2) as a control channel (write commands, read events) 13 * 2) as a control channel (write commands, read events)
14 */ 14 */
15 15
16#include <linux/sched.h>
16#include <linux/smp_lock.h> 17#include <linux/smp_lock.h>
17#include "irnet_ppp.h" /* Private header */ 18#include "irnet_ppp.h" /* Private header */
18/* Please put other headers in irnet.h - Thanks */ 19/* Please put other headers in irnet.h - Thanks */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 3aebabb158a8..1e428863574f 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -481,7 +481,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
481} 481}
482 482
483/* Create an IUCV socket */ 483/* Create an IUCV socket */
484static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) 484static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
485 int kern)
485{ 486{
486 struct sock *sk; 487 struct sock *sk;
487 488
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3973d0e61e56..3b1f5f5f8de7 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1768,7 +1768,6 @@ static void iucv_tasklet_fn(unsigned long ignored)
1768 */ 1768 */
1769static void iucv_work_fn(struct work_struct *work) 1769static void iucv_work_fn(struct work_struct *work)
1770{ 1770{
1771 typedef void iucv_irq_fn(struct iucv_irq_data *);
1772 LIST_HEAD(work_queue); 1771 LIST_HEAD(work_queue);
1773 struct iucv_irq_list *p, *n; 1772 struct iucv_irq_list *p, *n;
1774 1773
@@ -1878,14 +1877,25 @@ int iucv_path_table_empty(void)
1878static int iucv_pm_freeze(struct device *dev) 1877static int iucv_pm_freeze(struct device *dev)
1879{ 1878{
1880 int cpu; 1879 int cpu;
1880 struct iucv_irq_list *p, *n;
1881 int rc = 0; 1881 int rc = 0;
1882 1882
1883#ifdef CONFIG_PM_DEBUG 1883#ifdef CONFIG_PM_DEBUG
1884 printk(KERN_WARNING "iucv_pm_freeze\n"); 1884 printk(KERN_WARNING "iucv_pm_freeze\n");
1885#endif 1885#endif
1886 if (iucv_pm_state != IUCV_PM_FREEZING) {
1887 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
1888 smp_call_function_single(cpu, iucv_block_cpu_almost,
1889 NULL, 1);
1890 cancel_work_sync(&iucv_work);
1891 list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
1892 list_del_init(&p->list);
1893 iucv_sever_pathid(p->data.ippathid,
1894 iucv_error_no_listener);
1895 kfree(p);
1896 }
1897 }
1886 iucv_pm_state = IUCV_PM_FREEZING; 1898 iucv_pm_state = IUCV_PM_FREEZING;
1887 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
1888 smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
1889 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) 1899 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
1890 rc = dev->driver->pm->freeze(dev); 1900 rc = dev->driver->pm->freeze(dev);
1891 if (iucv_path_table_empty()) 1901 if (iucv_path_table_empty())
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 472f6594184a..84209fbbeb17 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -35,7 +35,7 @@
35#define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x)) 35#define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x))
36#define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x)) 36#define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x))
37 37
38static int pfkey_net_id; 38static int pfkey_net_id __read_mostly;
39struct netns_pfkey { 39struct netns_pfkey {
40 /* List of all pfkey sockets. */ 40 /* List of all pfkey sockets. */
41 struct hlist_head table; 41 struct hlist_head table;
@@ -177,7 +177,8 @@ static struct proto key_proto = {
177 .obj_size = sizeof(struct pfkey_sock), 177 .obj_size = sizeof(struct pfkey_sock),
178}; 178};
179 179
180static int pfkey_create(struct net *net, struct socket *sock, int protocol) 180static int pfkey_create(struct net *net, struct socket *sock, int protocol,
181 int kern)
181{ 182{
182 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 183 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
183 struct sock *sk; 184 struct sock *sk;
@@ -3764,28 +3765,14 @@ static struct xfrm_mgr pfkeyv2_mgr =
3764 3765
3765static int __net_init pfkey_net_init(struct net *net) 3766static int __net_init pfkey_net_init(struct net *net)
3766{ 3767{
3767 struct netns_pfkey *net_pfkey; 3768 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3768 int rv; 3769 int rv;
3769 3770
3770 net_pfkey = kmalloc(sizeof(struct netns_pfkey), GFP_KERNEL);
3771 if (!net_pfkey) {
3772 rv = -ENOMEM;
3773 goto out_kmalloc;
3774 }
3775 INIT_HLIST_HEAD(&net_pfkey->table); 3771 INIT_HLIST_HEAD(&net_pfkey->table);
3776 atomic_set(&net_pfkey->socks_nr, 0); 3772 atomic_set(&net_pfkey->socks_nr, 0);
3777 rv = net_assign_generic(net, pfkey_net_id, net_pfkey); 3773
3778 if (rv < 0)
3779 goto out_assign;
3780 rv = pfkey_init_proc(net); 3774 rv = pfkey_init_proc(net);
3781 if (rv < 0)
3782 goto out_proc;
3783 return 0;
3784 3775
3785out_proc:
3786out_assign:
3787 kfree(net_pfkey);
3788out_kmalloc:
3789 return rv; 3776 return rv;
3790} 3777}
3791 3778
@@ -3795,17 +3782,18 @@ static void __net_exit pfkey_net_exit(struct net *net)
3795 3782
3796 pfkey_exit_proc(net); 3783 pfkey_exit_proc(net);
3797 BUG_ON(!hlist_empty(&net_pfkey->table)); 3784 BUG_ON(!hlist_empty(&net_pfkey->table));
3798 kfree(net_pfkey);
3799} 3785}
3800 3786
3801static struct pernet_operations pfkey_net_ops = { 3787static struct pernet_operations pfkey_net_ops = {
3802 .init = pfkey_net_init, 3788 .init = pfkey_net_init,
3803 .exit = pfkey_net_exit, 3789 .exit = pfkey_net_exit,
3790 .id = &pfkey_net_id,
3791 .size = sizeof(struct netns_pfkey),
3804}; 3792};
3805 3793
3806static void __exit ipsec_pfkey_exit(void) 3794static void __exit ipsec_pfkey_exit(void)
3807{ 3795{
3808 unregister_pernet_gen_subsys(pfkey_net_id, &pfkey_net_ops); 3796 unregister_pernet_subsys(&pfkey_net_ops);
3809 xfrm_unregister_km(&pfkeyv2_mgr); 3797 xfrm_unregister_km(&pfkeyv2_mgr);
3810 sock_unregister(PF_KEY); 3798 sock_unregister(PF_KEY);
3811 proto_unregister(&key_proto); 3799 proto_unregister(&key_proto);
@@ -3824,7 +3812,7 @@ static int __init ipsec_pfkey_init(void)
3824 err = xfrm_register_km(&pfkeyv2_mgr); 3812 err = xfrm_register_km(&pfkeyv2_mgr);
3825 if (err != 0) 3813 if (err != 0)
3826 goto out_sock_unregister; 3814 goto out_sock_unregister;
3827 err = register_pernet_gen_subsys(&pfkey_net_id, &pfkey_net_ops); 3815 err = register_pernet_subsys(&pfkey_net_ops);
3828 if (err != 0) 3816 if (err != 0)
3829 goto out_xfrm_unregister_km; 3817 goto out_xfrm_unregister_km;
3830out: 3818out:
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 4866b4fb0c27..3a66546cad06 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -140,14 +140,17 @@ static struct proto llc_proto = {
140 140
141/** 141/**
142 * llc_ui_create - alloc and init a new llc_ui socket 142 * llc_ui_create - alloc and init a new llc_ui socket
143 * @net: network namespace (must be default network)
143 * @sock: Socket to initialize and attach allocated sk to. 144 * @sock: Socket to initialize and attach allocated sk to.
144 * @protocol: Unused. 145 * @protocol: Unused.
146 * @kern: on behalf of kernel or userspace
145 * 147 *
146 * Allocate and initialize a new llc_ui socket, validate the user wants a 148 * Allocate and initialize a new llc_ui socket, validate the user wants a
147 * socket type we have available. 149 * socket type we have available.
148 * Returns 0 upon success, negative upon failure. 150 * Returns 0 upon success, negative upon failure.
149 */ 151 */
150static int llc_ui_create(struct net *net, struct socket *sock, int protocol) 152static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
153 int kern)
151{ 154{
152 struct sock *sk; 155 struct sock *sk;
153 int rc = -ESOCKTNOSUPPORT; 156 int rc = -ESOCKTNOSUPPORT;
@@ -155,7 +158,7 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol)
155 if (!capable(CAP_NET_RAW)) 158 if (!capable(CAP_NET_RAW))
156 return -EPERM; 159 return -EPERM;
157 160
158 if (net != &init_net) 161 if (!net_eq(net, &init_net))
159 return -EAFNOSUPPORT; 162 return -EAFNOSUPPORT;
160 163
161 if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) { 164 if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 4d5543af3123..a10d508b07e1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -194,6 +194,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
194 194
195 Do not select this option. 195 Do not select this option.
196 196
197config MAC80211_VERBOSE_MHWMP_DEBUG
198 bool "Verbose mesh HWMP routing debugging"
199 depends on MAC80211_DEBUG_MENU
200 depends on MAC80211_MESH
201 ---help---
202 Selecting this option causes mac80211 to print out very
203 verbose mesh routing (HWMP) debugging messages (when mac80211
204 is taking part in a mesh network).
205 It should not be selected on production systems as those
206 messages are remotely triggerable.
207
208 Do not select this option.
209
197config MAC80211_DEBUG_COUNTERS 210config MAC80211_DEBUG_COUNTERS
198 bool "Extra statistics for TX/RX debugging" 211 bool "Extra statistics for TX/RX debugging"
199 depends on MAC80211_DEBUG_MENU 212 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 9f3cf7129324..298cfcc1bf8d 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o
2 2
3# mac80211 objects 3# mac80211 objects
4mac80211-y := \ 4mac80211-y := \
5 main.o \ 5 main.o status.o \
6 sta_info.o \ 6 sta_info.o \
7 wep.o \ 7 wep.o \
8 wpa.o \ 8 wpa.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index bc064d7933ff..51c7dc3c4c3b 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,7 +41,8 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 42#endif /* CONFIG_MAC80211_HT_DEBUG */
43 43
44 if (drv_ampdu_action(local, IEEE80211_AMPDU_RX_STOP, 44 if (drv_ampdu_action(local, &sta->sdata->vif,
45 IEEE80211_AMPDU_RX_STOP,
45 &sta->sta, tid, NULL)) 46 &sta->sta, tid, NULL))
46 printk(KERN_DEBUG "HW problem - can not stop rx " 47 printk(KERN_DEBUG "HW problem - can not stop rx "
47 "aggregation for tid %d\n", tid); 48 "aggregation for tid %d\n", tid);
@@ -85,10 +86,6 @@ void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *r
85 struct ieee80211_local *local = sdata->local; 86 struct ieee80211_local *local = sdata->local;
86 struct sta_info *sta; 87 struct sta_info *sta;
87 88
88 /* stop HW Rx aggregation. ampdu_action existence
89 * already verified in session init so we add the BUG_ON */
90 BUG_ON(!local->ops->ampdu_action);
91
92 rcu_read_lock(); 89 rcu_read_lock();
93 90
94 sta = sta_info_get(local, ra); 91 sta = sta_info_get(local, ra);
@@ -170,7 +167,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
170 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); 167 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
171 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); 168 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
172 169
173 ieee80211_tx_skb(sdata, skb, 1); 170 ieee80211_tx_skb(sdata, skb);
174} 171}
175 172
176void ieee80211_process_addba_request(struct ieee80211_local *local, 173void ieee80211_process_addba_request(struct ieee80211_local *local,
@@ -210,9 +207,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
210 * check if configuration can support the BA policy 207 * check if configuration can support the BA policy
211 * and if buffer size does not exceeds max value */ 208 * and if buffer size does not exceeds max value */
212 /* XXX: check own ht delayed BA capability?? */ 209 /* XXX: check own ht delayed BA capability?? */
213 if (((ba_policy != 1) 210 if (((ba_policy != 1) &&
214 && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) 211 (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
215 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { 212 (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
216 status = WLAN_STATUS_INVALID_QOS_PARAM; 213 status = WLAN_STATUS_INVALID_QOS_PARAM;
217#ifdef CONFIG_MAC80211_HT_DEBUG 214#ifdef CONFIG_MAC80211_HT_DEBUG
218 if (net_ratelimit()) 215 if (net_ratelimit())
@@ -284,7 +281,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
284 goto end; 281 goto end;
285 } 282 }
286 283
287 ret = drv_ampdu_action(local, IEEE80211_AMPDU_RX_START, 284 ret = drv_ampdu_action(local, &sta->sdata->vif,
285 IEEE80211_AMPDU_RX_START,
288 &sta->sta, tid, &start_seq_num); 286 &sta->sta, tid, &start_seq_num);
289#ifdef CONFIG_MAC80211_HT_DEBUG 287#ifdef CONFIG_MAC80211_HT_DEBUG
290 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 288 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index bd765f30dba2..5e3a7eccef5a 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -91,7 +91,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
91 mgmt->u.action.u.addba_req.start_seq_num = 91 mgmt->u.action.u.addba_req.start_seq_num =
92 cpu_to_le16(start_seq_num << 4); 92 cpu_to_le16(start_seq_num << 4);
93 93
94 ieee80211_tx_skb(sdata, skb, 1); 94 ieee80211_tx_skb(sdata, skb);
95} 95}
96 96
97void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) 97void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
@@ -120,16 +120,22 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
120 bar->control = cpu_to_le16(bar_control); 120 bar->control = cpu_to_le16(bar_control);
121 bar->start_seq_num = cpu_to_le16(ssn); 121 bar->start_seq_num = cpu_to_le16(ssn);
122 122
123 ieee80211_tx_skb(sdata, skb, 0); 123 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
124 ieee80211_tx_skb(sdata, skb);
124} 125}
125 126
126static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 127int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
127 enum ieee80211_back_parties initiator) 128 enum ieee80211_back_parties initiator)
128{ 129{
129 struct ieee80211_local *local = sta->local; 130 struct ieee80211_local *local = sta->local;
130 int ret; 131 int ret;
131 u8 *state; 132 u8 *state;
132 133
134#ifdef CONFIG_MAC80211_HT_DEBUG
135 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
136 sta->sta.addr, tid);
137#endif /* CONFIG_MAC80211_HT_DEBUG */
138
133 state = &sta->ampdu_mlme.tid_state_tx[tid]; 139 state = &sta->ampdu_mlme.tid_state_tx[tid];
134 140
135 if (*state == HT_AGG_STATE_OPERATIONAL) 141 if (*state == HT_AGG_STATE_OPERATIONAL)
@@ -138,12 +144,12 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
138 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
139 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
140 146
141 ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_STOP, 147 ret = drv_ampdu_action(local, &sta->sdata->vif,
148 IEEE80211_AMPDU_TX_STOP,
142 &sta->sta, tid, NULL); 149 &sta->sta, tid, NULL);
143 150
144 /* HW shall not deny going back to legacy */ 151 /* HW shall not deny going back to legacy */
145 if (WARN_ON(ret)) { 152 if (WARN_ON(ret)) {
146 *state = HT_AGG_STATE_OPERATIONAL;
147 /* 153 /*
148 * We may have pending packets get stuck in this case... 154 * We may have pending packets get stuck in this case...
149 * Not bothering with a workaround for now. 155 * Not bothering with a workaround for now.
@@ -173,12 +179,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
173 179
174 /* check if the TID waits for addBA response */ 180 /* check if the TID waits for addBA response */
175 spin_lock_bh(&sta->lock); 181 spin_lock_bh(&sta->lock);
176 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
183 HT_ADDBA_REQUESTED_MSK) {
177 spin_unlock_bh(&sta->lock); 184 spin_unlock_bh(&sta->lock);
178 *state = HT_AGG_STATE_IDLE; 185 *state = HT_AGG_STATE_IDLE;
179#ifdef CONFIG_MAC80211_HT_DEBUG 186#ifdef CONFIG_MAC80211_HT_DEBUG
180 printk(KERN_DEBUG "timer expired on tid %d but we are not " 187 printk(KERN_DEBUG "timer expired on tid %d but we are not "
181 "expecting addBA response there", tid); 188 "(or no longer) expecting addBA response there",
189 tid);
182#endif 190#endif
183 return; 191 return;
184 } 192 }
@@ -196,11 +204,11 @@ static inline int ieee80211_ac_from_tid(int tid)
196 return ieee802_1d_to_ac[tid & 7]; 204 return ieee802_1d_to_ac[tid & 7];
197} 205}
198 206
199int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) 207int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
200{ 208{
201 struct ieee80211_local *local = hw_to_local(hw); 209 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
202 struct sta_info *sta; 210 struct ieee80211_sub_if_data *sdata = sta->sdata;
203 struct ieee80211_sub_if_data *sdata; 211 struct ieee80211_local *local = sdata->local;
204 u8 *state; 212 u8 *state;
205 int ret = 0; 213 int ret = 0;
206 u16 start_seq_num; 214 u16 start_seq_num;
@@ -208,52 +216,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
208 if (WARN_ON(!local->ops->ampdu_action)) 216 if (WARN_ON(!local->ops->ampdu_action))
209 return -EINVAL; 217 return -EINVAL;
210 218
211 if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) 219 if ((tid >= STA_TID_NUM) ||
220 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
212 return -EINVAL; 221 return -EINVAL;
213 222
214#ifdef CONFIG_MAC80211_HT_DEBUG 223#ifdef CONFIG_MAC80211_HT_DEBUG
215 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n", 224 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
216 ra, tid); 225 pubsta->addr, tid);
217#endif /* CONFIG_MAC80211_HT_DEBUG */ 226#endif /* CONFIG_MAC80211_HT_DEBUG */
218 227
219 rcu_read_lock();
220
221 sta = sta_info_get(local, ra);
222 if (!sta) {
223#ifdef CONFIG_MAC80211_HT_DEBUG
224 printk(KERN_DEBUG "Could not find the station\n");
225#endif
226 ret = -ENOENT;
227 goto unlock;
228 }
229
230 /* 228 /*
231 * The aggregation code is not prepared to handle 229 * The aggregation code is not prepared to handle
232 * anything but STA/AP due to the BSSID handling. 230 * anything but STA/AP due to the BSSID handling.
233 * IBSS could work in the code but isn't supported 231 * IBSS could work in the code but isn't supported
234 * by drivers or the standard. 232 * by drivers or the standard.
235 */ 233 */
236 if (sta->sdata->vif.type != NL80211_IFTYPE_STATION && 234 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
237 sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 235 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
238 sta->sdata->vif.type != NL80211_IFTYPE_AP) { 236 sdata->vif.type != NL80211_IFTYPE_AP)
239 ret = -EINVAL; 237 return -EINVAL;
240 goto unlock;
241 }
242 238
243 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 239 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
244#ifdef CONFIG_MAC80211_HT_DEBUG 240#ifdef CONFIG_MAC80211_HT_DEBUG
245 printk(KERN_DEBUG "Suspend in progress. " 241 printk(KERN_DEBUG "Suspend in progress. "
246 "Denying BA session request\n"); 242 "Denying BA session request\n");
247#endif 243#endif
248 ret = -EINVAL; 244 return -EINVAL;
249 goto unlock;
250 } 245 }
251 246
252 spin_lock_bh(&sta->lock); 247 spin_lock_bh(&sta->lock);
253 spin_lock(&local->ampdu_lock); 248 spin_lock(&local->ampdu_lock);
254 249
255 sdata = sta->sdata;
256
257 /* we have tried too many times, receiver does not want A-MPDU */ 250 /* we have tried too many times, receiver does not want A-MPDU */
258 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 251 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
259 ret = -EBUSY; 252 ret = -EBUSY;
@@ -310,8 +303,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
310 303
311 start_seq_num = sta->tid_seq[tid]; 304 start_seq_num = sta->tid_seq[tid];
312 305
313 ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_START, 306 ret = drv_ampdu_action(local, &sdata->vif,
314 &sta->sta, tid, &start_seq_num); 307 IEEE80211_AMPDU_TX_START,
308 pubsta, tid, &start_seq_num);
315 309
316 if (ret) { 310 if (ret) {
317#ifdef CONFIG_MAC80211_HT_DEBUG 311#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -336,7 +330,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
336 sta->ampdu_mlme.dialog_token_allocator; 330 sta->ampdu_mlme.dialog_token_allocator;
337 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 331 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
338 332
339 ieee80211_send_addba_request(sta->sdata, ra, tid, 333 ieee80211_send_addba_request(sdata, pubsta->addr, tid,
340 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 334 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
341 sta->ampdu_mlme.tid_tx[tid]->ssn, 335 sta->ampdu_mlme.tid_tx[tid]->ssn,
342 0x40, 5000); 336 0x40, 5000);
@@ -348,7 +342,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
348#ifdef CONFIG_MAC80211_HT_DEBUG 342#ifdef CONFIG_MAC80211_HT_DEBUG
349 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); 343 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
350#endif 344#endif
351 goto unlock; 345 return 0;
352 346
353 err_free: 347 err_free:
354 kfree(sta->ampdu_mlme.tid_tx[tid]); 348 kfree(sta->ampdu_mlme.tid_tx[tid]);
@@ -360,8 +354,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
360 err_unlock_sta: 354 err_unlock_sta:
361 spin_unlock(&local->ampdu_lock); 355 spin_unlock(&local->ampdu_lock);
362 spin_unlock_bh(&sta->lock); 356 spin_unlock_bh(&sta->lock);
363 unlock:
364 rcu_read_unlock();
365 return ret; 357 return ret;
366} 358}
367EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 359EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
@@ -428,13 +420,15 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
428 ieee80211_agg_splice_finish(local, sta, tid); 420 ieee80211_agg_splice_finish(local, sta, tid);
429 spin_unlock(&local->ampdu_lock); 421 spin_unlock(&local->ampdu_lock);
430 422
431 drv_ampdu_action(local, IEEE80211_AMPDU_TX_OPERATIONAL, 423 drv_ampdu_action(local, &sta->sdata->vif,
424 IEEE80211_AMPDU_TX_OPERATIONAL,
432 &sta->sta, tid, NULL); 425 &sta->sta, tid, NULL);
433} 426}
434 427
435void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) 428void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
436{ 429{
437 struct ieee80211_local *local = hw_to_local(hw); 430 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
431 struct ieee80211_local *local = sdata->local;
438 struct sta_info *sta; 432 struct sta_info *sta;
439 u8 *state; 433 u8 *state;
440 434
@@ -483,10 +477,11 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
483} 477}
484EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); 478EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
485 479
486void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, 480void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
487 const u8 *ra, u16 tid) 481 const u8 *ra, u16 tid)
488{ 482{
489 struct ieee80211_local *local = hw_to_local(hw); 483 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
484 struct ieee80211_local *local = sdata->local;
490 struct ieee80211_ra_tid *ra_tid; 485 struct ieee80211_ra_tid *ra_tid;
491 struct sk_buff *skb = dev_alloc_skb(0); 486 struct sk_buff *skb = dev_alloc_skb(0);
492 487
@@ -501,6 +496,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
501 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 496 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
502 memcpy(&ra_tid->ra, ra, ETH_ALEN); 497 memcpy(&ra_tid->ra, ra, ETH_ALEN);
503 ra_tid->tid = tid; 498 ra_tid->tid = tid;
499 ra_tid->vif = vif;
504 500
505 skb->pkt_type = IEEE80211_ADDBA_MSG; 501 skb->pkt_type = IEEE80211_ADDBA_MSG;
506 skb_queue_tail(&local->skb_queue, skb); 502 skb_queue_tail(&local->skb_queue, skb);
@@ -523,11 +519,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
523 goto unlock; 519 goto unlock;
524 } 520 }
525 521
526#ifdef CONFIG_MAC80211_HT_DEBUG
527 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
528 sta->sta.addr, tid);
529#endif /* CONFIG_MAC80211_HT_DEBUG */
530
531 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); 522 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
532 523
533 unlock: 524 unlock:
@@ -535,36 +526,27 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
535 return ret; 526 return ret;
536} 527}
537 528
538int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, 529int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
539 u8 *ra, u16 tid,
540 enum ieee80211_back_parties initiator) 530 enum ieee80211_back_parties initiator)
541{ 531{
542 struct ieee80211_local *local = hw_to_local(hw); 532 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
543 struct sta_info *sta; 533 struct ieee80211_sub_if_data *sdata = sta->sdata;
544 int ret = 0; 534 struct ieee80211_local *local = sdata->local;
545 535
546 if (WARN_ON(!local->ops->ampdu_action)) 536 if (!local->ops->ampdu_action)
547 return -EINVAL; 537 return -EINVAL;
548 538
549 if (tid >= STA_TID_NUM) 539 if (tid >= STA_TID_NUM)
550 return -EINVAL; 540 return -EINVAL;
551 541
552 rcu_read_lock(); 542 return __ieee80211_stop_tx_ba_session(sta, tid, initiator);
553 sta = sta_info_get(local, ra);
554 if (!sta) {
555 rcu_read_unlock();
556 return -ENOENT;
557 }
558
559 ret = __ieee80211_stop_tx_ba_session(sta, tid, initiator);
560 rcu_read_unlock();
561 return ret;
562} 543}
563EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 544EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
564 545
565void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) 546void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
566{ 547{
567 struct ieee80211_local *local = hw_to_local(hw); 548 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
549 struct ieee80211_local *local = sdata->local;
568 struct sta_info *sta; 550 struct sta_info *sta;
569 u8 *state; 551 u8 *state;
570 552
@@ -627,10 +609,11 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
627} 609}
628EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); 610EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
629 611
630void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, 612void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
631 const u8 *ra, u16 tid) 613 const u8 *ra, u16 tid)
632{ 614{
633 struct ieee80211_local *local = hw_to_local(hw); 615 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
616 struct ieee80211_local *local = sdata->local;
634 struct ieee80211_ra_tid *ra_tid; 617 struct ieee80211_ra_tid *ra_tid;
635 struct sk_buff *skb = dev_alloc_skb(0); 618 struct sk_buff *skb = dev_alloc_skb(0);
636 619
@@ -645,6 +628,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
645 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 628 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
646 memcpy(&ra_tid->ra, ra, ETH_ALEN); 629 memcpy(&ra_tid->ra, ra, ETH_ALEN);
647 ra_tid->tid = tid; 630 ra_tid->tid = tid;
631 ra_tid->vif = vif;
648 632
649 skb->pkt_type = IEEE80211_DELBA_MSG; 633 skb->pkt_type = IEEE80211_DELBA_MSG;
650 skb_queue_tail(&local->skb_queue, skb); 634 skb_queue_tail(&local->skb_queue, skb);
@@ -668,24 +652,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
668 652
669 spin_lock_bh(&sta->lock); 653 spin_lock_bh(&sta->lock);
670 654
671 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 655 if (!(*state & HT_ADDBA_REQUESTED_MSK))
672 spin_unlock_bh(&sta->lock); 656 goto out;
673 return;
674 }
675 657
676 if (mgmt->u.action.u.addba_resp.dialog_token != 658 if (mgmt->u.action.u.addba_resp.dialog_token !=
677 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 659 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
678 spin_unlock_bh(&sta->lock);
679#ifdef CONFIG_MAC80211_HT_DEBUG 660#ifdef CONFIG_MAC80211_HT_DEBUG
680 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 661 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
681#endif /* CONFIG_MAC80211_HT_DEBUG */ 662#endif /* CONFIG_MAC80211_HT_DEBUG */
682 return; 663 goto out;
683 } 664 }
684 665
685 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 666 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
667
686#ifdef CONFIG_MAC80211_HT_DEBUG 668#ifdef CONFIG_MAC80211_HT_DEBUG
687 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); 669 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
688#endif /* CONFIG_MAC80211_HT_DEBUG */ 670#endif /* CONFIG_MAC80211_HT_DEBUG */
671
689 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 672 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
690 == WLAN_STATUS_SUCCESS) { 673 == WLAN_STATUS_SUCCESS) {
691 u8 curstate = *state; 674 u8 curstate = *state;
@@ -699,5 +682,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
699 } else { 682 } else {
700 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 683 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
701 } 684 }
685
686 out:
702 spin_unlock_bh(&sta->lock); 687 spin_unlock_bh(&sta->lock);
703} 688}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5608f6c68413..93ee1fd5c08d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -36,6 +36,15 @@ static bool nl80211_type_check(enum nl80211_iftype type)
36 } 36 }
37} 37}
38 38
39static bool nl80211_params_check(enum nl80211_iftype type,
40 struct vif_params *params)
41{
42 if (!nl80211_type_check(type))
43 return false;
44
45 return true;
46}
47
39static int ieee80211_add_iface(struct wiphy *wiphy, char *name, 48static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
40 enum nl80211_iftype type, u32 *flags, 49 enum nl80211_iftype type, u32 *flags,
41 struct vif_params *params) 50 struct vif_params *params)
@@ -45,7 +54,7 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
45 struct ieee80211_sub_if_data *sdata; 54 struct ieee80211_sub_if_data *sdata;
46 int err; 55 int err;
47 56
48 if (!nl80211_type_check(type)) 57 if (!nl80211_params_check(type, params))
49 return -EINVAL; 58 return -EINVAL;
50 59
51 err = ieee80211_if_add(local, name, &dev, type, params); 60 err = ieee80211_if_add(local, name, &dev, type, params);
@@ -72,7 +81,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
72 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata;
73 int ret; 82 int ret;
74 83
75 if (!nl80211_type_check(type)) 84 if (netif_running(dev))
85 return -EBUSY;
86
87 if (!nl80211_params_check(type, params))
76 return -EINVAL; 88 return -EINVAL;
77 89
78 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 90 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -81,9 +93,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
81 if (ret) 93 if (ret)
82 return ret; 94 return ret;
83 95
84 if (netif_running(sdata->dev))
85 return -EBUSY;
86
87 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) 96 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
88 ieee80211_sdata_set_mesh_id(sdata, 97 ieee80211_sdata_set_mesh_id(sdata,
89 params->mesh_id_len, 98 params->mesh_id_len,
@@ -92,6 +101,13 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
92 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) 101 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
93 return 0; 102 return 0;
94 103
104 if (type == NL80211_IFTYPE_AP_VLAN &&
105 params && params->use_4addr == 0)
106 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
107 else if (type == NL80211_IFTYPE_STATION &&
108 params && params->use_4addr >= 0)
109 sdata->u.mgd.use_4addr = params->use_4addr;
110
95 sdata->u.mntr_flags = *flags; 111 sdata->u.mntr_flags = *flags;
96 return 0; 112 return 0;
97} 113}
@@ -377,13 +393,13 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
377static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, 393static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
378 int idx, u8 *mac, struct station_info *sinfo) 394 int idx, u8 *mac, struct station_info *sinfo)
379{ 395{
380 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 396 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
381 struct sta_info *sta; 397 struct sta_info *sta;
382 int ret = -ENOENT; 398 int ret = -ENOENT;
383 399
384 rcu_read_lock(); 400 rcu_read_lock();
385 401
386 sta = sta_info_get_by_idx(local, idx, dev); 402 sta = sta_info_get_by_idx(sdata, idx);
387 if (sta) { 403 if (sta) {
388 ret = 0; 404 ret = 0;
389 memcpy(mac, sta->sta.addr, ETH_ALEN); 405 memcpy(mac, sta->sta.addr, ETH_ALEN);
@@ -738,13 +754,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
738 754
739 err = sta_info_insert(sta); 755 err = sta_info_insert(sta);
740 if (err) { 756 if (err) {
741 /* STA has been freed */
742 if (err == -EEXIST && layer2_update) {
743 /* Need to update layer 2 devices on reassociation */
744 sta = sta_info_get(local, mac);
745 if (sta)
746 ieee80211_send_layer2_update(sta);
747 }
748 rcu_read_unlock(); 757 rcu_read_unlock();
749 return err; 758 return err;
750 } 759 }
@@ -813,6 +822,15 @@ static int ieee80211_change_station(struct wiphy *wiphy,
813 return -EINVAL; 822 return -EINVAL;
814 } 823 }
815 824
825 if (params->vlan->ieee80211_ptr->use_4addr) {
826 if (vlansdata->u.vlan.sta) {
827 rcu_read_unlock();
828 return -EBUSY;
829 }
830
831 rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
832 }
833
816 sta->sdata = vlansdata; 834 sta->sdata = vlansdata;
817 ieee80211_send_layer2_update(sta); 835 ieee80211_send_layer2_update(sta);
818 } 836 }
@@ -914,7 +932,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
914 pinfo->generation = mesh_paths_generation; 932 pinfo->generation = mesh_paths_generation;
915 933
916 pinfo->filled = MPATH_INFO_FRAME_QLEN | 934 pinfo->filled = MPATH_INFO_FRAME_QLEN |
917 MPATH_INFO_DSN | 935 MPATH_INFO_SN |
918 MPATH_INFO_METRIC | 936 MPATH_INFO_METRIC |
919 MPATH_INFO_EXPTIME | 937 MPATH_INFO_EXPTIME |
920 MPATH_INFO_DISCOVERY_TIMEOUT | 938 MPATH_INFO_DISCOVERY_TIMEOUT |
@@ -922,7 +940,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
922 MPATH_INFO_FLAGS; 940 MPATH_INFO_FLAGS;
923 941
924 pinfo->frame_qlen = mpath->frame_queue.qlen; 942 pinfo->frame_qlen = mpath->frame_queue.qlen;
925 pinfo->dsn = mpath->dsn; 943 pinfo->sn = mpath->sn;
926 pinfo->metric = mpath->metric; 944 pinfo->metric = mpath->metric;
927 if (time_before(jiffies, mpath->exp_time)) 945 if (time_before(jiffies, mpath->exp_time))
928 pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); 946 pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies);
@@ -934,8 +952,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
934 pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; 952 pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE;
935 if (mpath->flags & MESH_PATH_RESOLVING) 953 if (mpath->flags & MESH_PATH_RESOLVING)
936 pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; 954 pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING;
937 if (mpath->flags & MESH_PATH_DSN_VALID) 955 if (mpath->flags & MESH_PATH_SN_VALID)
938 pinfo->flags |= NL80211_MPATH_FLAG_DSN_VALID; 956 pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID;
939 if (mpath->flags & MESH_PATH_FIXED) 957 if (mpath->flags & MESH_PATH_FIXED)
940 pinfo->flags |= NL80211_MPATH_FLAG_FIXED; 958 pinfo->flags |= NL80211_MPATH_FLAG_FIXED;
941 if (mpath->flags & MESH_PATH_RESOLVING) 959 if (mpath->flags & MESH_PATH_RESOLVING)
@@ -1008,7 +1026,10 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
1008{ 1026{
1009 struct mesh_config *conf; 1027 struct mesh_config *conf;
1010 struct ieee80211_sub_if_data *sdata; 1028 struct ieee80211_sub_if_data *sdata;
1029 struct ieee80211_if_mesh *ifmsh;
1030
1011 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1031 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1032 ifmsh = &sdata->u.mesh;
1012 1033
1013 /* Set the config options which we are interested in setting */ 1034 /* Set the config options which we are interested in setting */
1014 conf = &(sdata->u.mesh.mshcfg); 1035 conf = &(sdata->u.mesh.mshcfg);
@@ -1043,6 +1064,10 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
1043 mask)) 1064 mask))
1044 conf->dot11MeshHWMPnetDiameterTraversalTime = 1065 conf->dot11MeshHWMPnetDiameterTraversalTime =
1045 nconf->dot11MeshHWMPnetDiameterTraversalTime; 1066 nconf->dot11MeshHWMPnetDiameterTraversalTime;
1067 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) {
1068 conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
1069 ieee80211_mesh_root_setup(ifmsh);
1070 }
1046 return 0; 1071 return 0;
1047} 1072}
1048 1073
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 96991b68f048..e4b54093d41b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * mac80211 debugfs for wireless PHYs 3 * mac80211 debugfs for wireless PHYs
3 * 4 *
@@ -38,16 +39,10 @@ static const struct file_operations name## _ops = { \
38}; 39};
39 40
40#define DEBUGFS_ADD(name) \ 41#define DEBUGFS_ADD(name) \
41 local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \ 42 debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
42 local, &name## _ops);
43 43
44#define DEBUGFS_ADD_MODE(name, mode) \ 44#define DEBUGFS_ADD_MODE(name, mode) \
45 local->debugfs.name = debugfs_create_file(#name, mode, phyd, \ 45 debugfs_create_file(#name, mode, phyd, local, &name## _ops);
46 local, &name## _ops);
47
48#define DEBUGFS_DEL(name) \
49 debugfs_remove(local->debugfs.name); \
50 local->debugfs.name = NULL;
51 46
52 47
53DEBUGFS_READONLY_FILE(frequency, 20, "%d", 48DEBUGFS_READONLY_FILE(frequency, 20, "%d",
@@ -57,7 +52,7 @@ DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
57DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x", 52DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
58 local->wep_iv & 0xffffff); 53 local->wep_iv & 0xffffff);
59DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", 54DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
60 local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>"); 55 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
61 56
62static ssize_t tsf_read(struct file *file, char __user *user_buf, 57static ssize_t tsf_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos) 58 size_t count, loff_t *ppos)
@@ -233,12 +228,7 @@ static const struct file_operations stats_ ##name## _ops = { \
233}; 228};
234 229
235#define DEBUGFS_STATS_ADD(name) \ 230#define DEBUGFS_STATS_ADD(name) \
236 local->debugfs.stats.name = debugfs_create_file(#name, 0400, statsd,\ 231 debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
237 local, &stats_ ##name## _ops);
238
239#define DEBUGFS_STATS_DEL(name) \
240 debugfs_remove(local->debugfs.stats.name); \
241 local->debugfs.stats.name = NULL;
242 232
243DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", 233DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u",
244 local->dot11TransmittedFragmentCount); 234 local->dot11TransmittedFragmentCount);
@@ -326,7 +316,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
326 DEBUGFS_ADD(noack); 316 DEBUGFS_ADD(noack);
327 317
328 statsd = debugfs_create_dir("statistics", phyd); 318 statsd = debugfs_create_dir("statistics", phyd);
329 local->debugfs.statistics = statsd;
330 319
331 /* if the dir failed, don't put all the other things into the root! */ 320 /* if the dir failed, don't put all the other things into the root! */
332 if (!statsd) 321 if (!statsd)
@@ -367,57 +356,3 @@ void debugfs_hw_add(struct ieee80211_local *local)
367 DEBUGFS_STATS_ADD(dot11FCSErrorCount); 356 DEBUGFS_STATS_ADD(dot11FCSErrorCount);
368 DEBUGFS_STATS_ADD(dot11RTSSuccessCount); 357 DEBUGFS_STATS_ADD(dot11RTSSuccessCount);
369} 358}
370
371void debugfs_hw_del(struct ieee80211_local *local)
372{
373 DEBUGFS_DEL(frequency);
374 DEBUGFS_DEL(total_ps_buffered);
375 DEBUGFS_DEL(wep_iv);
376 DEBUGFS_DEL(tsf);
377 DEBUGFS_DEL(queues);
378 DEBUGFS_DEL(reset);
379 DEBUGFS_DEL(noack);
380
381 DEBUGFS_STATS_DEL(transmitted_fragment_count);
382 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
383 DEBUGFS_STATS_DEL(failed_count);
384 DEBUGFS_STATS_DEL(retry_count);
385 DEBUGFS_STATS_DEL(multiple_retry_count);
386 DEBUGFS_STATS_DEL(frame_duplicate_count);
387 DEBUGFS_STATS_DEL(received_fragment_count);
388 DEBUGFS_STATS_DEL(multicast_received_frame_count);
389 DEBUGFS_STATS_DEL(transmitted_frame_count);
390 DEBUGFS_STATS_DEL(num_scans);
391#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
392 DEBUGFS_STATS_DEL(tx_handlers_drop);
393 DEBUGFS_STATS_DEL(tx_handlers_queued);
394 DEBUGFS_STATS_DEL(tx_handlers_drop_unencrypted);
395 DEBUGFS_STATS_DEL(tx_handlers_drop_fragment);
396 DEBUGFS_STATS_DEL(tx_handlers_drop_wep);
397 DEBUGFS_STATS_DEL(tx_handlers_drop_not_assoc);
398 DEBUGFS_STATS_DEL(tx_handlers_drop_unauth_port);
399 DEBUGFS_STATS_DEL(rx_handlers_drop);
400 DEBUGFS_STATS_DEL(rx_handlers_queued);
401 DEBUGFS_STATS_DEL(rx_handlers_drop_nullfunc);
402 DEBUGFS_STATS_DEL(rx_handlers_drop_defrag);
403 DEBUGFS_STATS_DEL(rx_handlers_drop_short);
404 DEBUGFS_STATS_DEL(rx_handlers_drop_passive_scan);
405 DEBUGFS_STATS_DEL(tx_expand_skb_head);
406 DEBUGFS_STATS_DEL(tx_expand_skb_head_cloned);
407 DEBUGFS_STATS_DEL(rx_expand_skb_head);
408 DEBUGFS_STATS_DEL(rx_expand_skb_head2);
409 DEBUGFS_STATS_DEL(rx_handlers_fragments);
410 DEBUGFS_STATS_DEL(tx_status_drop);
411#endif
412 DEBUGFS_STATS_DEL(dot11ACKFailureCount);
413 DEBUGFS_STATS_DEL(dot11RTSFailureCount);
414 DEBUGFS_STATS_DEL(dot11FCSErrorCount);
415 DEBUGFS_STATS_DEL(dot11RTSSuccessCount);
416
417 debugfs_remove(local->debugfs.statistics);
418 local->debugfs.statistics = NULL;
419 debugfs_remove(local->debugfs.stations);
420 local->debugfs.stations = NULL;
421 debugfs_remove(local->debugfs.keys);
422 local->debugfs.keys = NULL;
423}
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index dd2541935c27..68e6a2050f9a 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -3,14 +3,12 @@
3 3
4#ifdef CONFIG_MAC80211_DEBUGFS 4#ifdef CONFIG_MAC80211_DEBUGFS
5extern void debugfs_hw_add(struct ieee80211_local *local); 5extern void debugfs_hw_add(struct ieee80211_local *local);
6extern void debugfs_hw_del(struct ieee80211_local *local);
7extern int mac80211_open_file_generic(struct inode *inode, struct file *file); 6extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
8#else 7#else
9static inline void debugfs_hw_add(struct ieee80211_local *local) 8static inline void debugfs_hw_add(struct ieee80211_local *local)
10{ 9{
11 return; 10 return;
12} 11}
13static inline void debugfs_hw_del(struct ieee80211_local *local) {}
14#endif 12#endif
15 13
16#endif /* __MAC80211_DEBUGFS_H */ 14#endif /* __MAC80211_DEBUGFS_H */
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 99c752588b30..e0f5224630da 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -225,8 +225,8 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
225KEY_OPS(key); 225KEY_OPS(key);
226 226
227#define DEBUGFS_ADD(name) \ 227#define DEBUGFS_ADD(name) \
228 key->debugfs.name = debugfs_create_file(#name, 0400,\ 228 debugfs_create_file(#name, 0400, key->debugfs.dir, \
229 key->debugfs.dir, key, &key_##name##_ops); 229 key, &key_##name##_ops);
230 230
231void ieee80211_debugfs_key_add(struct ieee80211_key *key) 231void ieee80211_debugfs_key_add(struct ieee80211_key *key)
232 { 232 {
@@ -271,30 +271,12 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
271 DEBUGFS_ADD(ifindex); 271 DEBUGFS_ADD(ifindex);
272}; 272};
273 273
274#define DEBUGFS_DEL(name) \
275 debugfs_remove(key->debugfs.name); key->debugfs.name = NULL;
276
277void ieee80211_debugfs_key_remove(struct ieee80211_key *key) 274void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
278{ 275{
279 if (!key) 276 if (!key)
280 return; 277 return;
281 278
282 DEBUGFS_DEL(keylen); 279 debugfs_remove_recursive(key->debugfs.dir);
283 DEBUGFS_DEL(flags);
284 DEBUGFS_DEL(keyidx);
285 DEBUGFS_DEL(hw_key_idx);
286 DEBUGFS_DEL(tx_rx_count);
287 DEBUGFS_DEL(algorithm);
288 DEBUGFS_DEL(tx_spec);
289 DEBUGFS_DEL(rx_spec);
290 DEBUGFS_DEL(replays);
291 DEBUGFS_DEL(icverrors);
292 DEBUGFS_DEL(key);
293 DEBUGFS_DEL(ifindex);
294
295 debugfs_remove(key->debugfs.stalink);
296 key->debugfs.stalink = NULL;
297 debugfs_remove(key->debugfs.dir);
298 key->debugfs.dir = NULL; 280 key->debugfs.dir = NULL;
299} 281}
300void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) 282void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
@@ -302,7 +284,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
302 char buf[50]; 284 char buf[50];
303 struct ieee80211_key *key; 285 struct ieee80211_key *key;
304 286
305 if (!sdata->debugfsdir) 287 if (!sdata->debugfs.dir)
306 return; 288 return;
307 289
308 /* this is running under the key lock */ 290 /* this is running under the key lock */
@@ -310,9 +292,9 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
310 key = sdata->default_key; 292 key = sdata->default_key;
311 if (key) { 293 if (key) {
312 sprintf(buf, "../keys/%d", key->debugfs.cnt); 294 sprintf(buf, "../keys/%d", key->debugfs.cnt);
313 sdata->common_debugfs.default_key = 295 sdata->debugfs.default_key =
314 debugfs_create_symlink("default_key", 296 debugfs_create_symlink("default_key",
315 sdata->debugfsdir, buf); 297 sdata->debugfs.dir, buf);
316 } else 298 } else
317 ieee80211_debugfs_key_remove_default(sdata); 299 ieee80211_debugfs_key_remove_default(sdata);
318} 300}
@@ -322,8 +304,8 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
322 if (!sdata) 304 if (!sdata)
323 return; 305 return;
324 306
325 debugfs_remove(sdata->common_debugfs.default_key); 307 debugfs_remove(sdata->debugfs.default_key);
326 sdata->common_debugfs.default_key = NULL; 308 sdata->debugfs.default_key = NULL;
327} 309}
328 310
329void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) 311void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
@@ -331,7 +313,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
331 char buf[50]; 313 char buf[50];
332 struct ieee80211_key *key; 314 struct ieee80211_key *key;
333 315
334 if (!sdata->debugfsdir) 316 if (!sdata->debugfs.dir)
335 return; 317 return;
336 318
337 /* this is running under the key lock */ 319 /* this is running under the key lock */
@@ -339,9 +321,9 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
339 key = sdata->default_mgmt_key; 321 key = sdata->default_mgmt_key;
340 if (key) { 322 if (key) {
341 sprintf(buf, "../keys/%d", key->debugfs.cnt); 323 sprintf(buf, "../keys/%d", key->debugfs.cnt);
342 sdata->common_debugfs.default_mgmt_key = 324 sdata->debugfs.default_mgmt_key =
343 debugfs_create_symlink("default_mgmt_key", 325 debugfs_create_symlink("default_mgmt_key",
344 sdata->debugfsdir, buf); 326 sdata->debugfs.dir, buf);
345 } else 327 } else
346 ieee80211_debugfs_key_remove_mgmt_default(sdata); 328 ieee80211_debugfs_key_remove_mgmt_default(sdata);
347} 329}
@@ -351,8 +333,8 @@ void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sda
351 if (!sdata) 333 if (!sdata)
352 return; 334 return;
353 335
354 debugfs_remove(sdata->common_debugfs.default_mgmt_key); 336 debugfs_remove(sdata->debugfs.default_mgmt_key);
355 sdata->common_debugfs.default_mgmt_key = NULL; 337 sdata->debugfs.default_mgmt_key = NULL;
356} 338}
357 339
358void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 340void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 61234e79022b..472b2039906c 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -149,12 +149,14 @@ IEEE80211_IF_FILE(path_refresh_time,
149 u.mesh.mshcfg.path_refresh_time, DEC); 149 u.mesh.mshcfg.path_refresh_time, DEC);
150IEEE80211_IF_FILE(min_discovery_timeout, 150IEEE80211_IF_FILE(min_discovery_timeout,
151 u.mesh.mshcfg.min_discovery_timeout, DEC); 151 u.mesh.mshcfg.min_discovery_timeout, DEC);
152IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
153 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
152#endif 154#endif
153 155
154 156
155#define DEBUGFS_ADD(name, type)\ 157#define DEBUGFS_ADD(name, type) \
156 sdata->debugfs.type.name = debugfs_create_file(#name, 0400,\ 158 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
157 sdata->debugfsdir, sdata, &name##_ops); 159 sdata, &name##_ops);
158 160
159static void add_sta_files(struct ieee80211_sub_if_data *sdata) 161static void add_sta_files(struct ieee80211_sub_if_data *sdata)
160{ 162{
@@ -199,30 +201,32 @@ static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
199} 201}
200 202
201#ifdef CONFIG_MAC80211_MESH 203#ifdef CONFIG_MAC80211_MESH
202#define MESHSTATS_ADD(name)\
203 sdata->mesh_stats.name = debugfs_create_file(#name, 0400,\
204 sdata->mesh_stats_dir, sdata, &name##_ops);
205 204
206static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) 205static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
207{ 206{
208 sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats", 207 struct dentry *dir = debugfs_create_dir("mesh_stats",
209 sdata->debugfsdir); 208 sdata->debugfs.dir);
209
210#define MESHSTATS_ADD(name)\
211 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
212
210 MESHSTATS_ADD(fwded_mcast); 213 MESHSTATS_ADD(fwded_mcast);
211 MESHSTATS_ADD(fwded_unicast); 214 MESHSTATS_ADD(fwded_unicast);
212 MESHSTATS_ADD(fwded_frames); 215 MESHSTATS_ADD(fwded_frames);
213 MESHSTATS_ADD(dropped_frames_ttl); 216 MESHSTATS_ADD(dropped_frames_ttl);
214 MESHSTATS_ADD(dropped_frames_no_route); 217 MESHSTATS_ADD(dropped_frames_no_route);
215 MESHSTATS_ADD(estab_plinks); 218 MESHSTATS_ADD(estab_plinks);
219#undef MESHSTATS_ADD
216} 220}
217 221
218#define MESHPARAMS_ADD(name)\
219 sdata->mesh_config.name = debugfs_create_file(#name, 0600,\
220 sdata->mesh_config_dir, sdata, &name##_ops);
221
222static void add_mesh_config(struct ieee80211_sub_if_data *sdata) 222static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
223{ 223{
224 sdata->mesh_config_dir = debugfs_create_dir("mesh_config", 224 struct dentry *dir = debugfs_create_dir("mesh_config",
225 sdata->debugfsdir); 225 sdata->debugfs.dir);
226
227#define MESHPARAMS_ADD(name) \
228 debugfs_create_file(#name, 0600, dir, sdata, &name##_ops);
229
226 MESHPARAMS_ADD(dot11MeshMaxRetries); 230 MESHPARAMS_ADD(dot11MeshMaxRetries);
227 MESHPARAMS_ADD(dot11MeshRetryTimeout); 231 MESHPARAMS_ADD(dot11MeshRetryTimeout);
228 MESHPARAMS_ADD(dot11MeshConfirmTimeout); 232 MESHPARAMS_ADD(dot11MeshConfirmTimeout);
@@ -236,12 +240,14 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
236 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); 240 MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
237 MESHPARAMS_ADD(path_refresh_time); 241 MESHPARAMS_ADD(path_refresh_time);
238 MESHPARAMS_ADD(min_discovery_timeout); 242 MESHPARAMS_ADD(min_discovery_timeout);
243
244#undef MESHPARAMS_ADD
239} 245}
240#endif 246#endif
241 247
242static void add_files(struct ieee80211_sub_if_data *sdata) 248static void add_files(struct ieee80211_sub_if_data *sdata)
243{ 249{
244 if (!sdata->debugfsdir) 250 if (!sdata->debugfs.dir)
245 return; 251 return;
246 252
247 switch (sdata->vif.type) { 253 switch (sdata->vif.type) {
@@ -274,134 +280,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
274 } 280 }
275} 281}
276 282
277#define DEBUGFS_DEL(name, type) \
278 do { \
279 debugfs_remove(sdata->debugfs.type.name); \
280 sdata->debugfs.type.name = NULL; \
281 } while (0)
282
283static void del_sta_files(struct ieee80211_sub_if_data *sdata)
284{
285 DEBUGFS_DEL(drop_unencrypted, sta);
286 DEBUGFS_DEL(force_unicast_rateidx, sta);
287 DEBUGFS_DEL(max_ratectrl_rateidx, sta);
288
289 DEBUGFS_DEL(bssid, sta);
290 DEBUGFS_DEL(aid, sta);
291 DEBUGFS_DEL(capab, sta);
292}
293
294static void del_ap_files(struct ieee80211_sub_if_data *sdata)
295{
296 DEBUGFS_DEL(drop_unencrypted, ap);
297 DEBUGFS_DEL(force_unicast_rateidx, ap);
298 DEBUGFS_DEL(max_ratectrl_rateidx, ap);
299
300 DEBUGFS_DEL(num_sta_ps, ap);
301 DEBUGFS_DEL(dtim_count, ap);
302 DEBUGFS_DEL(num_buffered_multicast, ap);
303}
304
305static void del_wds_files(struct ieee80211_sub_if_data *sdata)
306{
307 DEBUGFS_DEL(drop_unencrypted, wds);
308 DEBUGFS_DEL(force_unicast_rateidx, wds);
309 DEBUGFS_DEL(max_ratectrl_rateidx, wds);
310
311 DEBUGFS_DEL(peer, wds);
312}
313
314static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
315{
316 DEBUGFS_DEL(drop_unencrypted, vlan);
317 DEBUGFS_DEL(force_unicast_rateidx, vlan);
318 DEBUGFS_DEL(max_ratectrl_rateidx, vlan);
319}
320
321static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
322{
323}
324
325#ifdef CONFIG_MAC80211_MESH
326#define MESHSTATS_DEL(name) \
327 do { \
328 debugfs_remove(sdata->mesh_stats.name); \
329 sdata->mesh_stats.name = NULL; \
330 } while (0)
331
332static void del_mesh_stats(struct ieee80211_sub_if_data *sdata)
333{
334 MESHSTATS_DEL(fwded_mcast);
335 MESHSTATS_DEL(fwded_unicast);
336 MESHSTATS_DEL(fwded_frames);
337 MESHSTATS_DEL(dropped_frames_ttl);
338 MESHSTATS_DEL(dropped_frames_no_route);
339 MESHSTATS_DEL(estab_plinks);
340 debugfs_remove(sdata->mesh_stats_dir);
341 sdata->mesh_stats_dir = NULL;
342}
343
344#define MESHPARAMS_DEL(name) \
345 do { \
346 debugfs_remove(sdata->mesh_config.name); \
347 sdata->mesh_config.name = NULL; \
348 } while (0)
349
350static void del_mesh_config(struct ieee80211_sub_if_data *sdata)
351{
352 MESHPARAMS_DEL(dot11MeshMaxRetries);
353 MESHPARAMS_DEL(dot11MeshRetryTimeout);
354 MESHPARAMS_DEL(dot11MeshConfirmTimeout);
355 MESHPARAMS_DEL(dot11MeshHoldingTimeout);
356 MESHPARAMS_DEL(dot11MeshTTL);
357 MESHPARAMS_DEL(auto_open_plinks);
358 MESHPARAMS_DEL(dot11MeshMaxPeerLinks);
359 MESHPARAMS_DEL(dot11MeshHWMPactivePathTimeout);
360 MESHPARAMS_DEL(dot11MeshHWMPpreqMinInterval);
361 MESHPARAMS_DEL(dot11MeshHWMPnetDiameterTraversalTime);
362 MESHPARAMS_DEL(dot11MeshHWMPmaxPREQretries);
363 MESHPARAMS_DEL(path_refresh_time);
364 MESHPARAMS_DEL(min_discovery_timeout);
365 debugfs_remove(sdata->mesh_config_dir);
366 sdata->mesh_config_dir = NULL;
367}
368#endif
369
370static void del_files(struct ieee80211_sub_if_data *sdata)
371{
372 if (!sdata->debugfsdir)
373 return;
374
375 switch (sdata->vif.type) {
376 case NL80211_IFTYPE_MESH_POINT:
377#ifdef CONFIG_MAC80211_MESH
378 del_mesh_stats(sdata);
379 del_mesh_config(sdata);
380#endif
381 break;
382 case NL80211_IFTYPE_STATION:
383 del_sta_files(sdata);
384 break;
385 case NL80211_IFTYPE_ADHOC:
386 /* XXX */
387 break;
388 case NL80211_IFTYPE_AP:
389 del_ap_files(sdata);
390 break;
391 case NL80211_IFTYPE_WDS:
392 del_wds_files(sdata);
393 break;
394 case NL80211_IFTYPE_MONITOR:
395 del_monitor_files(sdata);
396 break;
397 case NL80211_IFTYPE_AP_VLAN:
398 del_vlan_files(sdata);
399 break;
400 default:
401 break;
402 }
403}
404
405static int notif_registered; 283static int notif_registered;
406 284
407void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) 285void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
@@ -412,16 +290,18 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
412 return; 290 return;
413 291
414 sprintf(buf, "netdev:%s", sdata->dev->name); 292 sprintf(buf, "netdev:%s", sdata->dev->name);
415 sdata->debugfsdir = debugfs_create_dir(buf, 293 sdata->debugfs.dir = debugfs_create_dir(buf,
416 sdata->local->hw.wiphy->debugfsdir); 294 sdata->local->hw.wiphy->debugfsdir);
417 add_files(sdata); 295 add_files(sdata);
418} 296}
419 297
420void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) 298void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
421{ 299{
422 del_files(sdata); 300 if (!sdata->debugfs.dir)
423 debugfs_remove(sdata->debugfsdir); 301 return;
424 sdata->debugfsdir = NULL; 302
303 debugfs_remove_recursive(sdata->debugfs.dir);
304 sdata->debugfs.dir = NULL;
425} 305}
426 306
427static int netdev_notify(struct notifier_block *nb, 307static int netdev_notify(struct notifier_block *nb,
@@ -444,7 +324,7 @@ static int netdev_notify(struct notifier_block *nb,
444 324
445 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 325 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
446 326
447 dir = sdata->debugfsdir; 327 dir = sdata->debugfs.dir;
448 328
449 if (!dir) 329 if (!dir)
450 return 0; 330 return 0;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 33a2e892115b..3f41608c8081 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU);
57STA_FILE(tx_retry_failed, tx_retry_failed, LU); 57STA_FILE(tx_retry_failed, tx_retry_failed, LU);
58STA_FILE(tx_retry_count, tx_retry_count, LU); 58STA_FILE(tx_retry_count, tx_retry_count, LU);
59STA_FILE(last_signal, last_signal, D); 59STA_FILE(last_signal, last_signal, D);
60STA_FILE(last_qual, last_qual, D);
61STA_FILE(last_noise, last_noise, D); 60STA_FILE(last_noise, last_noise, D);
62STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); 61STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
63 62
@@ -67,10 +66,11 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
67 char buf[100]; 66 char buf[100];
68 struct sta_info *sta = file->private_data; 67 struct sta_info *sta = file->private_data;
69 u32 staflags = get_sta_flags(sta); 68 u32 staflags = get_sta_flags(sta);
70 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s", 69 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
71 staflags & WLAN_STA_AUTH ? "AUTH\n" : "", 70 staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
72 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 71 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
73 staflags & WLAN_STA_PS ? "PS\n" : "", 72 staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "",
73 staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "",
74 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 74 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
75 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 75 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
76 staflags & WLAN_STA_WME ? "WME\n" : "", 76 staflags & WLAN_STA_WME ? "WME\n" : "",
@@ -157,14 +157,38 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
157} 157}
158STA_OPS(agg_status); 158STA_OPS(agg_status);
159 159
160static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
161 size_t count, loff_t *ppos)
162{
163 char buf[200], *p = buf;
164 int i;
165 struct sta_info *sta = file->private_data;
166 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
167
168 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
169 htc->ht_supported ? "" : "not ");
170 if (htc->ht_supported) {
171 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap);
172 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
173 htc->ampdu_factor, htc->ampdu_density);
174 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
175 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
177 htc->mcs.rx_mask[i]);
178 p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n",
179 le16_to_cpu(htc->mcs.rx_highest));
180 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
181 htc->mcs.tx_params);
182 }
183
184 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
185}
186STA_OPS(ht_capa);
187
160#define DEBUGFS_ADD(name) \ 188#define DEBUGFS_ADD(name) \
161 sta->debugfs.name = debugfs_create_file(#name, 0400, \ 189 debugfs_create_file(#name, 0400, \
162 sta->debugfs.dir, sta, &sta_ ##name## _ops); 190 sta->debugfs.dir, sta, &sta_ ##name## _ops);
163 191
164#define DEBUGFS_DEL(name) \
165 debugfs_remove(sta->debugfs.name);\
166 sta->debugfs.name = NULL;
167
168 192
169void ieee80211_sta_debugfs_add(struct sta_info *sta) 193void ieee80211_sta_debugfs_add(struct sta_info *sta)
170{ 194{
@@ -209,36 +233,13 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
209 DEBUGFS_ADD(tx_retry_failed); 233 DEBUGFS_ADD(tx_retry_failed);
210 DEBUGFS_ADD(tx_retry_count); 234 DEBUGFS_ADD(tx_retry_count);
211 DEBUGFS_ADD(last_signal); 235 DEBUGFS_ADD(last_signal);
212 DEBUGFS_ADD(last_qual);
213 DEBUGFS_ADD(last_noise); 236 DEBUGFS_ADD(last_noise);
214 DEBUGFS_ADD(wep_weak_iv_count); 237 DEBUGFS_ADD(wep_weak_iv_count);
238 DEBUGFS_ADD(ht_capa);
215} 239}
216 240
217void ieee80211_sta_debugfs_remove(struct sta_info *sta) 241void ieee80211_sta_debugfs_remove(struct sta_info *sta)
218{ 242{
219 DEBUGFS_DEL(flags); 243 debugfs_remove_recursive(sta->debugfs.dir);
220 DEBUGFS_DEL(num_ps_buf_frames);
221 DEBUGFS_DEL(inactive_ms);
222 DEBUGFS_DEL(last_seq_ctrl);
223 DEBUGFS_DEL(agg_status);
224 DEBUGFS_DEL(aid);
225 DEBUGFS_DEL(dev);
226 DEBUGFS_DEL(rx_packets);
227 DEBUGFS_DEL(tx_packets);
228 DEBUGFS_DEL(rx_bytes);
229 DEBUGFS_DEL(tx_bytes);
230 DEBUGFS_DEL(rx_duplicates);
231 DEBUGFS_DEL(rx_fragments);
232 DEBUGFS_DEL(rx_dropped);
233 DEBUGFS_DEL(tx_fragments);
234 DEBUGFS_DEL(tx_filtered);
235 DEBUGFS_DEL(tx_retry_failed);
236 DEBUGFS_DEL(tx_retry_count);
237 DEBUGFS_DEL(last_signal);
238 DEBUGFS_DEL(last_qual);
239 DEBUGFS_DEL(last_noise);
240 DEBUGFS_DEL(wep_weak_iv_count);
241
242 debugfs_remove(sta->debugfs.dir);
243 sta->debugfs.dir = NULL; 244 sta->debugfs.dir = NULL;
244} 245}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 020a94a31106..921dd9c9ff62 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -239,15 +239,16 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
239} 239}
240 240
241static inline int drv_ampdu_action(struct ieee80211_local *local, 241static inline int drv_ampdu_action(struct ieee80211_local *local,
242 struct ieee80211_vif *vif,
242 enum ieee80211_ampdu_mlme_action action, 243 enum ieee80211_ampdu_mlme_action action,
243 struct ieee80211_sta *sta, u16 tid, 244 struct ieee80211_sta *sta, u16 tid,
244 u16 *ssn) 245 u16 *ssn)
245{ 246{
246 int ret = -EOPNOTSUPP; 247 int ret = -EOPNOTSUPP;
247 if (local->ops->ampdu_action) 248 if (local->ops->ampdu_action)
248 ret = local->ops->ampdu_action(&local->hw, action, 249 ret = local->ops->ampdu_action(&local->hw, vif, action,
249 sta, tid, ssn); 250 sta, tid, ssn);
250 trace_drv_ampdu_action(local, action, sta, tid, ssn, ret); 251 trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret);
251 return ret; 252 return ret;
252} 253}
253 254
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 37b9051afcf3..b8fef1d11369 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -634,11 +634,12 @@ TRACE_EVENT(drv_tx_last_beacon,
634 634
635TRACE_EVENT(drv_ampdu_action, 635TRACE_EVENT(drv_ampdu_action,
636 TP_PROTO(struct ieee80211_local *local, 636 TP_PROTO(struct ieee80211_local *local,
637 struct ieee80211_vif *vif,
637 enum ieee80211_ampdu_mlme_action action, 638 enum ieee80211_ampdu_mlme_action action,
638 struct ieee80211_sta *sta, u16 tid, 639 struct ieee80211_sta *sta, u16 tid,
639 u16 *ssn, int ret), 640 u16 *ssn, int ret),
640 641
641 TP_ARGS(local, action, sta, tid, ssn, ret), 642 TP_ARGS(local, vif, action, sta, tid, ssn, ret),
642 643
643 TP_STRUCT__entry( 644 TP_STRUCT__entry(
644 LOCAL_ENTRY 645 LOCAL_ENTRY
@@ -647,10 +648,12 @@ TRACE_EVENT(drv_ampdu_action,
647 __field(u16, tid) 648 __field(u16, tid)
648 __field(u16, ssn) 649 __field(u16, ssn)
649 __field(int, ret) 650 __field(int, ret)
651 VIF_ENTRY
650 ), 652 ),
651 653
652 TP_fast_assign( 654 TP_fast_assign(
653 LOCAL_ASSIGN; 655 LOCAL_ASSIGN;
656 VIF_ASSIGN;
654 STA_ASSIGN; 657 STA_ASSIGN;
655 __entry->ret = ret; 658 __entry->ret = ret;
656 __entry->action = action; 659 __entry->action = action;
@@ -659,8 +662,8 @@ TRACE_EVENT(drv_ampdu_action,
659 ), 662 ),
660 663
661 TP_printk( 664 TP_printk(
662 LOCAL_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", 665 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
663 LOCAL_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret 666 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
664 ) 667 )
665); 668);
666#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 669#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 0891bfb06996..3787455fb696 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -134,14 +134,13 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
134 mgmt->u.action.u.delba.params = cpu_to_le16(params); 134 mgmt->u.action.u.delba.params = cpu_to_le16(params);
135 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); 135 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
136 136
137 ieee80211_tx_skb(sdata, skb, 1); 137 ieee80211_tx_skb(sdata, skb);
138} 138}
139 139
140void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, 140void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
141 struct sta_info *sta, 141 struct sta_info *sta,
142 struct ieee80211_mgmt *mgmt, size_t len) 142 struct ieee80211_mgmt *mgmt, size_t len)
143{ 143{
144 struct ieee80211_local *local = sdata->local;
145 u16 tid, params; 144 u16 tid, params;
146 u16 initiator; 145 u16 initiator;
147 146
@@ -153,7 +152,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
153 if (net_ratelimit()) 152 if (net_ratelimit())
154 printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", 153 printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n",
155 mgmt->sa, initiator ? "initiator" : "recipient", tid, 154 mgmt->sa, initiator ? "initiator" : "recipient", tid,
156 mgmt->u.action.u.delba.reason_code); 155 le16_to_cpu(mgmt->u.action.u.delba.reason_code));
157#endif /* CONFIG_MAC80211_HT_DEBUG */ 156#endif /* CONFIG_MAC80211_HT_DEBUG */
158 157
159 if (initiator == WLAN_BACK_INITIATOR) 158 if (initiator == WLAN_BACK_INITIATOR)
@@ -161,10 +160,9 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
161 WLAN_BACK_INITIATOR, 0); 160 WLAN_BACK_INITIATOR, 0);
162 else { /* WLAN_BACK_RECIPIENT */ 161 else { /* WLAN_BACK_RECIPIENT */
163 spin_lock_bh(&sta->lock); 162 spin_lock_bh(&sta->lock);
164 sta->ampdu_mlme.tid_state_tx[tid] = 163 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
165 HT_AGG_STATE_OPERATIONAL; 164 ___ieee80211_stop_tx_ba_session(sta, tid,
165 WLAN_BACK_RECIPIENT);
166 spin_unlock_bh(&sta->lock); 166 spin_unlock_bh(&sta->lock);
167 ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
168 WLAN_BACK_RECIPIENT);
169 } 167 }
170} 168}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 6eaf69823439..10d13856f86c 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -73,6 +73,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
73 struct ieee80211_mgmt *mgmt; 73 struct ieee80211_mgmt *mgmt;
74 u8 *pos; 74 u8 *pos;
75 struct ieee80211_supported_band *sband; 75 struct ieee80211_supported_band *sband;
76 struct cfg80211_bss *bss;
76 u32 bss_change; 77 u32 bss_change;
77 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 78 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
78 79
@@ -177,8 +178,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
177 mod_timer(&ifibss->timer, 178 mod_timer(&ifibss->timer,
178 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); 179 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
179 180
180 cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel, 181 bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
181 mgmt, skb->len, 0, GFP_KERNEL); 182 mgmt, skb->len, 0, GFP_KERNEL);
183 cfg80211_put_bss(bss);
182 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); 184 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
183} 185}
184 186
@@ -453,6 +455,10 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
453 455
454 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); 456 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
455 457
458 if (time_before(jiffies, ifibss->last_scan_completed +
459 IEEE80211_IBSS_MERGE_INTERVAL))
460 return;
461
456 if (ieee80211_sta_active_ibss(sdata)) 462 if (ieee80211_sta_active_ibss(sdata))
457 return; 463 return;
458 464
@@ -538,13 +544,12 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
538 WLAN_CAPABILITY_PRIVACY, 544 WLAN_CAPABILITY_PRIVACY,
539 capability); 545 capability);
540 546
547 if (bss) {
541#ifdef CONFIG_MAC80211_IBSS_DEBUG 548#ifdef CONFIG_MAC80211_IBSS_DEBUG
542 if (bss)
543 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 549 printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
544 "%pM\n", bss->cbss.bssid, ifibss->bssid); 550 "%pM\n", bss->cbss.bssid, ifibss->bssid);
545#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 551#endif /* CONFIG_MAC80211_IBSS_DEBUG */
546 552
547 if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
548 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 553 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
549 " based on configured SSID\n", 554 " based on configured SSID\n",
550 sdata->dev->name, bss->cbss.bssid); 555 sdata->dev->name, bss->cbss.bssid);
@@ -552,8 +557,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
552 ieee80211_sta_join_ibss(sdata, bss); 557 ieee80211_sta_join_ibss(sdata, bss);
553 ieee80211_rx_bss_put(local, bss); 558 ieee80211_rx_bss_put(local, bss);
554 return; 559 return;
555 } else if (bss) 560 }
556 ieee80211_rx_bss_put(local, bss);
557 561
558#ifdef CONFIG_MAC80211_IBSS_DEBUG 562#ifdef CONFIG_MAC80211_IBSS_DEBUG
559 printk(KERN_DEBUG " did not try to join ibss\n"); 563 printk(KERN_DEBUG " did not try to join ibss\n");
@@ -655,7 +659,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
655 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n", 659 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
656 sdata->dev->name, resp->da); 660 sdata->dev->name, resp->da);
657#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 661#endif /* CONFIG_MAC80211_IBSS_DEBUG */
658 ieee80211_tx_skb(sdata, skb, 0); 662 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
663 ieee80211_tx_skb(sdata, skb);
659} 664}
660 665
661static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, 666static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 588005c84a6d..039affa7c871 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <net/ieee80211_radiotap.h>
26#include <net/cfg80211.h> 27#include <net/cfg80211.h>
27#include <net/mac80211.h> 28#include <net/mac80211.h>
28#include "key.h" 29#include "key.h"
@@ -167,16 +168,12 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
167 168
168struct ieee80211_rx_data { 169struct ieee80211_rx_data {
169 struct sk_buff *skb; 170 struct sk_buff *skb;
170 struct net_device *dev;
171 struct ieee80211_local *local; 171 struct ieee80211_local *local;
172 struct ieee80211_sub_if_data *sdata; 172 struct ieee80211_sub_if_data *sdata;
173 struct sta_info *sta; 173 struct sta_info *sta;
174 struct ieee80211_key *key; 174 struct ieee80211_key *key;
175 struct ieee80211_rx_status *status;
176 struct ieee80211_rate *rate;
177 175
178 unsigned int flags; 176 unsigned int flags;
179 int sent_ps_buffered;
180 int queue; 177 int queue;
181 u32 tkip_iv32; 178 u32 tkip_iv32;
182 u16 tkip_iv16; 179 u16 tkip_iv16;
@@ -209,6 +206,9 @@ struct ieee80211_if_wds {
209 206
210struct ieee80211_if_vlan { 207struct ieee80211_if_vlan {
211 struct list_head list; 208 struct list_head list;
209
210 /* used for all tx if the VLAN is configured to 4-addr mode */
211 struct sta_info *sta;
212}; 212};
213 213
214struct mesh_stats { 214struct mesh_stats {
@@ -312,6 +312,8 @@ struct ieee80211_if_managed {
312 } mfp; /* management frame protection */ 312 } mfp; /* management frame protection */
313 313
314 int wmm_last_param_set; 314 int wmm_last_param_set;
315
316 u8 use_4addr;
315}; 317};
316 318
317enum ieee80211_ibss_request { 319enum ieee80211_ibss_request {
@@ -353,6 +355,7 @@ struct ieee80211_if_mesh {
353 struct work_struct work; 355 struct work_struct work;
354 struct timer_list housekeeping_timer; 356 struct timer_list housekeeping_timer;
355 struct timer_list mesh_path_timer; 357 struct timer_list mesh_path_timer;
358 struct timer_list mesh_path_root_timer;
356 struct sk_buff_head skb_queue; 359 struct sk_buff_head skb_queue;
357 360
358 unsigned long timers_running; 361 unsigned long timers_running;
@@ -362,23 +365,23 @@ struct ieee80211_if_mesh {
362 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 365 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
363 size_t mesh_id_len; 366 size_t mesh_id_len;
364 /* Active Path Selection Protocol Identifier */ 367 /* Active Path Selection Protocol Identifier */
365 u8 mesh_pp_id[4]; 368 u8 mesh_pp_id;
366 /* Active Path Selection Metric Identifier */ 369 /* Active Path Selection Metric Identifier */
367 u8 mesh_pm_id[4]; 370 u8 mesh_pm_id;
368 /* Congestion Control Mode Identifier */ 371 /* Congestion Control Mode Identifier */
369 u8 mesh_cc_id[4]; 372 u8 mesh_cc_id;
370 /* Synchronization Protocol Identifier */ 373 /* Synchronization Protocol Identifier */
371 u8 mesh_sp_id[4]; 374 u8 mesh_sp_id;
372 /* Authentication Protocol Identifier */ 375 /* Authentication Protocol Identifier */
373 u8 mesh_auth_id[4]; 376 u8 mesh_auth_id;
374 /* Local mesh Destination Sequence Number */ 377 /* Local mesh Sequence Number */
375 u32 dsn; 378 u32 sn;
376 /* Last used PREQ ID */ 379 /* Last used PREQ ID */
377 u32 preq_id; 380 u32 preq_id;
378 atomic_t mpaths; 381 atomic_t mpaths;
379 /* Timestamp of last DSN update */ 382 /* Timestamp of last SN update */
380 unsigned long last_dsn_update; 383 unsigned long last_sn_update;
381 /* Timestamp of last DSN sent */ 384 /* Timestamp of last SN sent */
382 unsigned long last_preq; 385 unsigned long last_preq;
383 struct mesh_rmc *rmc; 386 struct mesh_rmc *rmc;
384 spinlock_t mesh_preq_queue_lock; 387 spinlock_t mesh_preq_queue_lock;
@@ -471,74 +474,11 @@ struct ieee80211_sub_if_data {
471 } u; 474 } u;
472 475
473#ifdef CONFIG_MAC80211_DEBUGFS 476#ifdef CONFIG_MAC80211_DEBUGFS
474 struct dentry *debugfsdir;
475 union {
476 struct {
477 struct dentry *drop_unencrypted;
478 struct dentry *bssid;
479 struct dentry *aid;
480 struct dentry *capab;
481 struct dentry *force_unicast_rateidx;
482 struct dentry *max_ratectrl_rateidx;
483 } sta;
484 struct {
485 struct dentry *drop_unencrypted;
486 struct dentry *num_sta_ps;
487 struct dentry *dtim_count;
488 struct dentry *force_unicast_rateidx;
489 struct dentry *max_ratectrl_rateidx;
490 struct dentry *num_buffered_multicast;
491 } ap;
492 struct {
493 struct dentry *drop_unencrypted;
494 struct dentry *peer;
495 struct dentry *force_unicast_rateidx;
496 struct dentry *max_ratectrl_rateidx;
497 } wds;
498 struct {
499 struct dentry *drop_unencrypted;
500 struct dentry *force_unicast_rateidx;
501 struct dentry *max_ratectrl_rateidx;
502 } vlan;
503 struct {
504 struct dentry *mode;
505 } monitor;
506 } debugfs;
507 struct { 477 struct {
478 struct dentry *dir;
508 struct dentry *default_key; 479 struct dentry *default_key;
509 struct dentry *default_mgmt_key; 480 struct dentry *default_mgmt_key;
510 } common_debugfs; 481 } debugfs;
511
512#ifdef CONFIG_MAC80211_MESH
513 struct dentry *mesh_stats_dir;
514 struct {
515 struct dentry *fwded_mcast;
516 struct dentry *fwded_unicast;
517 struct dentry *fwded_frames;
518 struct dentry *dropped_frames_ttl;
519 struct dentry *dropped_frames_no_route;
520 struct dentry *estab_plinks;
521 struct timer_list mesh_path_timer;
522 } mesh_stats;
523
524 struct dentry *mesh_config_dir;
525 struct {
526 struct dentry *dot11MeshRetryTimeout;
527 struct dentry *dot11MeshConfirmTimeout;
528 struct dentry *dot11MeshHoldingTimeout;
529 struct dentry *dot11MeshMaxRetries;
530 struct dentry *dot11MeshTTL;
531 struct dentry *auto_open_plinks;
532 struct dentry *dot11MeshMaxPeerLinks;
533 struct dentry *dot11MeshHWMPactivePathTimeout;
534 struct dentry *dot11MeshHWMPpreqMinInterval;
535 struct dentry *dot11MeshHWMPnetDiameterTraversalTime;
536 struct dentry *dot11MeshHWMPmaxPREQretries;
537 struct dentry *path_refresh_time;
538 struct dentry *min_discovery_timeout;
539 } mesh_config;
540#endif
541
542#endif 482#endif
543 /* must be last, dynamically sized area in this! */ 483 /* must be last, dynamically sized area in this! */
544 struct ieee80211_vif vif; 484 struct ieee80211_vif vif;
@@ -639,7 +579,6 @@ struct ieee80211_local {
639 /* number of interfaces with corresponding FIF_ flags */ 579 /* number of interfaces with corresponding FIF_ flags */
640 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; 580 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
641 unsigned int filter_flags; /* FIF_* */ 581 unsigned int filter_flags; /* FIF_* */
642 struct iw_statistics wstats;
643 582
644 /* protects the aggregated multicast list and filter calls */ 583 /* protects the aggregated multicast list and filter calls */
645 spinlock_t filter_lock; 584 spinlock_t filter_lock;
@@ -662,6 +601,14 @@ struct ieee80211_local {
662 bool suspended; 601 bool suspended;
663 602
664 /* 603 /*
604 * Resuming is true while suspended, but when we're reprogramming the
605 * hardware -- at that time it's allowed to use ieee80211_queue_work()
606 * again even though some other parts of the stack are still suspended
607 * and we still drop received frames to avoid waking the stack.
608 */
609 bool resuming;
610
611 /*
665 * quiescing is true during the suspend process _only_ to 612 * quiescing is true during the suspend process _only_ to
666 * ease timer cancelling etc. 613 * ease timer cancelling etc.
667 */ 614 */
@@ -730,10 +677,9 @@ struct ieee80211_local {
730 unsigned long scanning; 677 unsigned long scanning;
731 struct cfg80211_ssid scan_ssid; 678 struct cfg80211_ssid scan_ssid;
732 struct cfg80211_scan_request *int_scan_req; 679 struct cfg80211_scan_request *int_scan_req;
733 struct cfg80211_scan_request *scan_req; 680 struct cfg80211_scan_request *scan_req, *hw_scan_req;
734 struct ieee80211_channel *scan_channel; 681 struct ieee80211_channel *scan_channel;
735 const u8 *orig_ies; 682 enum ieee80211_band hw_scan_band;
736 int orig_ies_len;
737 int scan_channel_idx; 683 int scan_channel_idx;
738 int scan_ies_len; 684 int scan_ies_len;
739 685
@@ -818,53 +764,6 @@ struct ieee80211_local {
818#ifdef CONFIG_MAC80211_DEBUGFS 764#ifdef CONFIG_MAC80211_DEBUGFS
819 struct local_debugfsdentries { 765 struct local_debugfsdentries {
820 struct dentry *rcdir; 766 struct dentry *rcdir;
821 struct dentry *rcname;
822 struct dentry *frequency;
823 struct dentry *total_ps_buffered;
824 struct dentry *wep_iv;
825 struct dentry *tsf;
826 struct dentry *queues;
827 struct dentry *reset;
828 struct dentry *noack;
829 struct dentry *statistics;
830 struct local_debugfsdentries_statsdentries {
831 struct dentry *transmitted_fragment_count;
832 struct dentry *multicast_transmitted_frame_count;
833 struct dentry *failed_count;
834 struct dentry *retry_count;
835 struct dentry *multiple_retry_count;
836 struct dentry *frame_duplicate_count;
837 struct dentry *received_fragment_count;
838 struct dentry *multicast_received_frame_count;
839 struct dentry *transmitted_frame_count;
840 struct dentry *wep_undecryptable_count;
841 struct dentry *num_scans;
842#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
843 struct dentry *tx_handlers_drop;
844 struct dentry *tx_handlers_queued;
845 struct dentry *tx_handlers_drop_unencrypted;
846 struct dentry *tx_handlers_drop_fragment;
847 struct dentry *tx_handlers_drop_wep;
848 struct dentry *tx_handlers_drop_not_assoc;
849 struct dentry *tx_handlers_drop_unauth_port;
850 struct dentry *rx_handlers_drop;
851 struct dentry *rx_handlers_queued;
852 struct dentry *rx_handlers_drop_nullfunc;
853 struct dentry *rx_handlers_drop_defrag;
854 struct dentry *rx_handlers_drop_short;
855 struct dentry *rx_handlers_drop_passive_scan;
856 struct dentry *tx_expand_skb_head;
857 struct dentry *tx_expand_skb_head_cloned;
858 struct dentry *rx_expand_skb_head;
859 struct dentry *rx_expand_skb_head2;
860 struct dentry *rx_handlers_fragments;
861 struct dentry *tx_status_drop;
862#endif
863 struct dentry *dot11ACKFailureCount;
864 struct dentry *dot11RTSFailureCount;
865 struct dentry *dot11FCSErrorCount;
866 struct dentry *dot11RTSSuccessCount;
867 } stats;
868 struct dentry *stations; 767 struct dentry *stations;
869 struct dentry *keys; 768 struct dentry *keys;
870 } debugfs; 769 } debugfs;
@@ -877,8 +776,9 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
877 return netdev_priv(dev); 776 return netdev_priv(dev);
878} 777}
879 778
880/* this struct represents 802.11n's RA/TID combination */ 779/* this struct represents 802.11n's RA/TID combination along with our vif */
881struct ieee80211_ra_tid { 780struct ieee80211_ra_tid {
781 struct ieee80211_vif *vif;
882 u8 ra[ETH_ALEN]; 782 u8 ra[ETH_ALEN];
883 u16 tid; 783 u16 tid;
884}; 784};
@@ -905,12 +805,13 @@ struct ieee802_11_elems {
905 u8 *wmm_param; 805 u8 *wmm_param;
906 struct ieee80211_ht_cap *ht_cap_elem; 806 struct ieee80211_ht_cap *ht_cap_elem;
907 struct ieee80211_ht_info *ht_info_elem; 807 struct ieee80211_ht_info *ht_info_elem;
908 u8 *mesh_config; 808 struct ieee80211_meshconf_ie *mesh_config;
909 u8 *mesh_id; 809 u8 *mesh_id;
910 u8 *peer_link; 810 u8 *peer_link;
911 u8 *preq; 811 u8 *preq;
912 u8 *prep; 812 u8 *prep;
913 u8 *perr; 813 u8 *perr;
814 struct ieee80211_rann_ie *rann;
914 u8 *ch_switch_elem; 815 u8 *ch_switch_elem;
915 u8 *country_elem; 816 u8 *country_elem;
916 u8 *pwr_constr_elem; 817 u8 *pwr_constr_elem;
@@ -932,7 +833,6 @@ struct ieee802_11_elems {
932 u8 ext_supp_rates_len; 833 u8 ext_supp_rates_len;
933 u8 wmm_info_len; 834 u8 wmm_info_len;
934 u8 wmm_param_len; 835 u8 wmm_param_len;
935 u8 mesh_config_len;
936 u8 mesh_id_len; 836 u8 mesh_id_len;
937 u8 peer_link_len; 837 u8 peer_link_len;
938 u8 preq_len; 838 u8 preq_len;
@@ -1055,6 +955,18 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1055netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, 955netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1056 struct net_device *dev); 956 struct net_device *dev);
1057 957
958/*
959 * radiotap header for status frames
960 */
961struct ieee80211_tx_status_rtap_hdr {
962 struct ieee80211_radiotap_header hdr;
963 u8 rate;
964 u8 padding_for_rate;
965 __le16 tx_flags;
966 u8 data_retries;
967} __attribute__ ((packed));
968
969
1058/* HT */ 970/* HT */
1059void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 971void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
1060 struct ieee80211_ht_cap *ht_cap_ie, 972 struct ieee80211_ht_cap *ht_cap_ie,
@@ -1083,6 +995,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
1083 995
1084int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 996int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1085 enum ieee80211_back_parties initiator); 997 enum ieee80211_back_parties initiator);
998int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
999 enum ieee80211_back_parties initiator);
1086 1000
1087/* Spectrum management */ 1001/* Spectrum management */
1088void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1002void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1122,8 +1036,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
1122 struct ieee80211_hdr *hdr, const u8 *tsc, 1036 struct ieee80211_hdr *hdr, const u8 *tsc,
1123 gfp_t gfp); 1037 gfp_t gfp);
1124void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1038void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1125void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, 1039void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1126 int encrypt);
1127void ieee802_11_parse_elems(u8 *start, size_t len, 1040void ieee802_11_parse_elems(u8 *start, size_t len,
1128 struct ieee802_11_elems *elems); 1041 struct ieee802_11_elems *elems);
1129u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, 1042u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
@@ -1160,7 +1073,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1160 u8 *extra, size_t extra_len, const u8 *bssid, 1073 u8 *extra, size_t extra_len, const u8 *bssid,
1161 const u8 *key, u8 key_len, u8 key_idx); 1074 const u8 *key, u8 key_len, u8 key_idx);
1162int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1075int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1163 const u8 *ie, size_t ie_len); 1076 const u8 *ie, size_t ie_len,
1077 enum ieee80211_band band);
1164void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1078void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1165 const u8 *ssid, size_t ssid_len, 1079 const u8 *ssid, size_t ssid_len,
1166 const u8 *ie, size_t ie_len); 1080 const u8 *ie, size_t ie_len);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 14f10eb91c5c..1bf12a26b45e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -214,8 +214,8 @@ static int ieee80211_open(struct net_device *dev)
214 /* must be before the call to ieee80211_configure_filter */ 214 /* must be before the call to ieee80211_configure_filter */
215 local->monitors++; 215 local->monitors++;
216 if (local->monitors == 1) { 216 if (local->monitors == 1) {
217 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; 217 local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
218 hw_reconf_flags |= IEEE80211_CONF_CHANGE_RADIOTAP; 218 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
219 } 219 }
220 220
221 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) 221 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
@@ -435,8 +435,8 @@ static int ieee80211_stop(struct net_device *dev)
435 435
436 local->monitors--; 436 local->monitors--;
437 if (local->monitors == 0) { 437 if (local->monitors == 0) {
438 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; 438 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
439 hw_reconf_flags |= IEEE80211_CONF_CHANGE_RADIOTAP; 439 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
440 } 440 }
441 441
442 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) 442 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
@@ -752,6 +752,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
752 ieee80211_mandatory_rates(sdata->local, 752 ieee80211_mandatory_rates(sdata->local,
753 sdata->local->hw.conf.channel->band); 753 sdata->local->hw.conf.channel->band);
754 sdata->drop_unencrypted = 0; 754 sdata->drop_unencrypted = 0;
755 if (type == NL80211_IFTYPE_STATION)
756 sdata->u.mgd.use_4addr = false;
755 757
756 return 0; 758 return 0;
757} 759}
@@ -809,6 +811,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
809 /* setup type-dependent data */ 811 /* setup type-dependent data */
810 ieee80211_setup_sdata(sdata, type); 812 ieee80211_setup_sdata(sdata, type);
811 813
814 if (params) {
815 ndev->ieee80211_ptr->use_4addr = params->use_4addr;
816 if (type == NL80211_IFTYPE_STATION)
817 sdata->u.mgd.use_4addr = params->use_4addr;
818 }
819
812 ret = register_netdevice(ndev); 820 ret = register_netdevice(ndev);
813 if (ret) 821 if (ret)
814 goto fail; 822 goto fail;
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 9572e00f532c..a49f93b79e92 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -118,18 +118,6 @@ struct ieee80211_key {
118 struct { 118 struct {
119 struct dentry *stalink; 119 struct dentry *stalink;
120 struct dentry *dir; 120 struct dentry *dir;
121 struct dentry *keylen;
122 struct dentry *flags;
123 struct dentry *keyidx;
124 struct dentry *hw_key_idx;
125 struct dentry *tx_rx_count;
126 struct dentry *algorithm;
127 struct dentry *tx_spec;
128 struct dentry *rx_spec;
129 struct dentry *replays;
130 struct dentry *icverrors;
131 struct dentry *key;
132 struct dentry *ifindex;
133 int cnt; 121 int cnt;
134 } debugfs; 122 } debugfs;
135#endif 123#endif
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 797f53942e5f..8116d1a96a4a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <net/mac80211.h> 11#include <net/mac80211.h>
12#include <net/ieee80211_radiotap.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/netdevice.h> 14#include <linux/netdevice.h>
@@ -30,26 +29,11 @@
30#include "rate.h" 29#include "rate.h"
31#include "mesh.h" 30#include "mesh.h"
32#include "wep.h" 31#include "wep.h"
33#include "wme.h"
34#include "aes_ccm.h"
35#include "led.h" 32#include "led.h"
36#include "cfg.h" 33#include "cfg.h"
37#include "debugfs.h" 34#include "debugfs.h"
38#include "debugfs_netdev.h" 35#include "debugfs_netdev.h"
39 36
40/*
41 * For seeing transmitted packets on monitor interfaces
42 * we have a radiotap header too.
43 */
44struct ieee80211_tx_status_rtap_hdr {
45 struct ieee80211_radiotap_header hdr;
46 u8 rate;
47 u8 padding_for_rate;
48 __le16 tx_flags;
49 u8 data_retries;
50} __attribute__ ((packed));
51
52
53void ieee80211_configure_filter(struct ieee80211_local *local) 37void ieee80211_configure_filter(struct ieee80211_local *local)
54{ 38{
55 u64 mc; 39 u64 mc;
@@ -253,28 +237,6 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
253 BSS_CHANGED_ERP_SLOT; 237 BSS_CHANGED_ERP_SLOT;
254} 238}
255 239
256void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
257 struct sk_buff *skb)
258{
259 struct ieee80211_local *local = hw_to_local(hw);
260 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
261 int tmp;
262
263 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
264 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
265 &local->skb_queue : &local->skb_queue_unreliable, skb);
266 tmp = skb_queue_len(&local->skb_queue) +
267 skb_queue_len(&local->skb_queue_unreliable);
268 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
269 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
270 dev_kfree_skb_irq(skb);
271 tmp--;
272 I802_DEBUG_INC(local->tx_status_drop);
273 }
274 tasklet_schedule(&local->tasklet);
275}
276EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
277
278static void ieee80211_tasklet_handler(unsigned long data) 240static void ieee80211_tasklet_handler(unsigned long data)
279{ 241{
280 struct ieee80211_local *local = (struct ieee80211_local *) data; 242 struct ieee80211_local *local = (struct ieee80211_local *) data;
@@ -296,14 +258,14 @@ static void ieee80211_tasklet_handler(unsigned long data)
296 break; 258 break;
297 case IEEE80211_DELBA_MSG: 259 case IEEE80211_DELBA_MSG:
298 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 260 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
299 ieee80211_stop_tx_ba_cb(local_to_hw(local), 261 ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra,
300 ra_tid->ra, ra_tid->tid); 262 ra_tid->tid);
301 dev_kfree_skb(skb); 263 dev_kfree_skb(skb);
302 break; 264 break;
303 case IEEE80211_ADDBA_MSG: 265 case IEEE80211_ADDBA_MSG:
304 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 266 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
305 ieee80211_start_tx_ba_cb(local_to_hw(local), 267 ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra,
306 ra_tid->ra, ra_tid->tid); 268 ra_tid->tid);
307 dev_kfree_skb(skb); 269 dev_kfree_skb(skb);
308 break ; 270 break ;
309 default: 271 default:
@@ -315,299 +277,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
315 } 277 }
316} 278}
317 279
318static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
319 struct sta_info *sta,
320 struct sk_buff *skb)
321{
322 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
323
324 /*
325 * XXX: This is temporary!
326 *
327 * The problem here is that when we get here, the driver will
328 * quite likely have pretty much overwritten info->control by
329 * using info->driver_data or info->rate_driver_data. Thus,
330 * when passing out the frame to the driver again, we would be
331 * passing completely bogus data since the driver would then
332 * expect a properly filled info->control. In mac80211 itself
333 * the same problem occurs, since we need info->control.vif
334 * internally.
335 *
336 * To fix this, we should send the frame through TX processing
337 * again. However, it's not that simple, since the frame will
338 * have been software-encrypted (if applicable) already, and
339 * encrypting it again doesn't do much good. So to properly do
340 * that, we not only have to skip the actual 'raw' encryption
341 * (key selection etc. still has to be done!) but also the
342 * sequence number assignment since that impacts the crypto
343 * encapsulation, of course.
344 *
345 * Hence, for now, fix the bug by just dropping the frame.
346 */
347 goto drop;
348
349 sta->tx_filtered_count++;
350
351 /*
352 * Clear the TX filter mask for this STA when sending the next
353 * packet. If the STA went to power save mode, this will happen
354 * when it wakes up for the next time.
355 */
356 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
357
358 /*
359 * This code races in the following way:
360 *
361 * (1) STA sends frame indicating it will go to sleep and does so
362 * (2) hardware/firmware adds STA to filter list, passes frame up
363 * (3) hardware/firmware processes TX fifo and suppresses a frame
364 * (4) we get TX status before having processed the frame and
365 * knowing that the STA has gone to sleep.
366 *
367 * This is actually quite unlikely even when both those events are
368 * processed from interrupts coming in quickly after one another or
369 * even at the same time because we queue both TX status events and
370 * RX frames to be processed by a tasklet and process them in the
371 * same order that they were received or TX status last. Hence, there
372 * is no race as long as the frame RX is processed before the next TX
373 * status, which drivers can ensure, see below.
374 *
375 * Note that this can only happen if the hardware or firmware can
376 * actually add STAs to the filter list, if this is done by the
377 * driver in response to set_tim() (which will only reduce the race
378 * this whole filtering tries to solve, not completely solve it)
379 * this situation cannot happen.
380 *
381 * To completely solve this race drivers need to make sure that they
382 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
383 * functions and
384 * (b) always process RX events before TX status events if ordering
385 * can be unknown, for example with different interrupt status
386 * bits.
387 */
388 if (test_sta_flags(sta, WLAN_STA_PS) &&
389 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
390 skb_queue_tail(&sta->tx_filtered, skb);
391 return;
392 }
393
394 if (!test_sta_flags(sta, WLAN_STA_PS) &&
395 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
396 /* Software retry the packet once */
397 info->flags |= IEEE80211_TX_INTFL_RETRIED;
398 ieee80211_add_pending_skb(local, skb);
399 return;
400 }
401
402 drop:
403#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
404 if (net_ratelimit())
405 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
406 "queue_len=%d PS=%d @%lu\n",
407 wiphy_name(local->hw.wiphy),
408 skb_queue_len(&sta->tx_filtered),
409 !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
410#endif
411 dev_kfree_skb(skb);
412}
413
414void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
415{
416 struct sk_buff *skb2;
417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
418 struct ieee80211_local *local = hw_to_local(hw);
419 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
420 u16 frag, type;
421 __le16 fc;
422 struct ieee80211_supported_band *sband;
423 struct ieee80211_tx_status_rtap_hdr *rthdr;
424 struct ieee80211_sub_if_data *sdata;
425 struct net_device *prev_dev = NULL;
426 struct sta_info *sta;
427 int retry_count = -1, i;
428
429 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
430 /* the HW cannot have attempted that rate */
431 if (i >= hw->max_rates) {
432 info->status.rates[i].idx = -1;
433 info->status.rates[i].count = 0;
434 }
435
436 retry_count += info->status.rates[i].count;
437 }
438 if (retry_count < 0)
439 retry_count = 0;
440
441 rcu_read_lock();
442
443 sband = local->hw.wiphy->bands[info->band];
444
445 sta = sta_info_get(local, hdr->addr1);
446
447 if (sta) {
448 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
449 test_sta_flags(sta, WLAN_STA_PS)) {
450 /*
451 * The STA is in power save mode, so assume
452 * that this TX packet failed because of that.
453 */
454 ieee80211_handle_filtered_frame(local, sta, skb);
455 rcu_read_unlock();
456 return;
457 }
458
459 fc = hdr->frame_control;
460
461 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
462 (ieee80211_is_data_qos(fc))) {
463 u16 tid, ssn;
464 u8 *qc;
465
466 qc = ieee80211_get_qos_ctl(hdr);
467 tid = qc[0] & 0xf;
468 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
469 & IEEE80211_SCTL_SEQ);
470 ieee80211_send_bar(sta->sdata, hdr->addr1,
471 tid, ssn);
472 }
473
474 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
475 ieee80211_handle_filtered_frame(local, sta, skb);
476 rcu_read_unlock();
477 return;
478 } else {
479 if (!(info->flags & IEEE80211_TX_STAT_ACK))
480 sta->tx_retry_failed++;
481 sta->tx_retry_count += retry_count;
482 }
483
484 rate_control_tx_status(local, sband, sta, skb);
485 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
486 ieee80211s_update_metric(local, sta, skb);
487 }
488
489 rcu_read_unlock();
490
491 ieee80211_led_tx(local, 0);
492
493 /* SNMP counters
494 * Fragments are passed to low-level drivers as separate skbs, so these
495 * are actually fragments, not frames. Update frame counters only for
496 * the first fragment of the frame. */
497
498 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
499 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
500
501 if (info->flags & IEEE80211_TX_STAT_ACK) {
502 if (frag == 0) {
503 local->dot11TransmittedFrameCount++;
504 if (is_multicast_ether_addr(hdr->addr1))
505 local->dot11MulticastTransmittedFrameCount++;
506 if (retry_count > 0)
507 local->dot11RetryCount++;
508 if (retry_count > 1)
509 local->dot11MultipleRetryCount++;
510 }
511
512 /* This counter shall be incremented for an acknowledged MPDU
513 * with an individual address in the address 1 field or an MPDU
514 * with a multicast address in the address 1 field of type Data
515 * or Management. */
516 if (!is_multicast_ether_addr(hdr->addr1) ||
517 type == IEEE80211_FTYPE_DATA ||
518 type == IEEE80211_FTYPE_MGMT)
519 local->dot11TransmittedFragmentCount++;
520 } else {
521 if (frag == 0)
522 local->dot11FailedCount++;
523 }
524
525 /* this was a transmitted frame, but now we want to reuse it */
526 skb_orphan(skb);
527
528 /*
529 * This is a bit racy but we can avoid a lot of work
530 * with this test...
531 */
532 if (!local->monitors && !local->cooked_mntrs) {
533 dev_kfree_skb(skb);
534 return;
535 }
536
537 /* send frame to monitor interfaces now */
538
539 if (skb_headroom(skb) < sizeof(*rthdr)) {
540 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
541 dev_kfree_skb(skb);
542 return;
543 }
544
545 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
546 skb_push(skb, sizeof(*rthdr));
547
548 memset(rthdr, 0, sizeof(*rthdr));
549 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
550 rthdr->hdr.it_present =
551 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
552 (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
553 (1 << IEEE80211_RADIOTAP_RATE));
554
555 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
556 !is_multicast_ether_addr(hdr->addr1))
557 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
558
559 /*
560 * XXX: Once radiotap gets the bitmap reset thing the vendor
561 * extensions proposal contains, we can actually report
562 * the whole set of tries we did.
563 */
564 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
565 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
566 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
567 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
568 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
569 if (info->status.rates[0].idx >= 0 &&
570 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
571 rthdr->rate = sband->bitrates[
572 info->status.rates[0].idx].bitrate / 5;
573
574 /* for now report the total retry_count */
575 rthdr->data_retries = retry_count;
576
577 /* XXX: is this sufficient for BPF? */
578 skb_set_mac_header(skb, 0);
579 skb->ip_summed = CHECKSUM_UNNECESSARY;
580 skb->pkt_type = PACKET_OTHERHOST;
581 skb->protocol = htons(ETH_P_802_2);
582 memset(skb->cb, 0, sizeof(skb->cb));
583
584 rcu_read_lock();
585 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
586 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
587 if (!netif_running(sdata->dev))
588 continue;
589
590 if (prev_dev) {
591 skb2 = skb_clone(skb, GFP_ATOMIC);
592 if (skb2) {
593 skb2->dev = prev_dev;
594 netif_rx(skb2);
595 }
596 }
597
598 prev_dev = sdata->dev;
599 }
600 }
601 if (prev_dev) {
602 skb->dev = prev_dev;
603 netif_rx(skb);
604 skb = NULL;
605 }
606 rcu_read_unlock();
607 dev_kfree_skb(skb);
608}
609EXPORT_SYMBOL(ieee80211_tx_status);
610
611static void ieee80211_restart_work(struct work_struct *work) 280static void ieee80211_restart_work(struct work_struct *work)
612{ 281{
613 struct ieee80211_local *local = 282 struct ieee80211_local *local =
@@ -659,7 +328,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
659 if (!wiphy) 328 if (!wiphy)
660 return NULL; 329 return NULL;
661 330
662 wiphy->netnsok = true; 331 wiphy->flags |= WIPHY_FLAG_NETNS_OK |
332 WIPHY_FLAG_4ADDR_AP |
333 WIPHY_FLAG_4ADDR_STATION;
663 wiphy->privid = mac80211_wiphy_privid; 334 wiphy->privid = mac80211_wiphy_privid;
664 335
665 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 336 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
@@ -901,6 +572,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
901 i++; 572 i++;
902 } 573 }
903 } 574 }
575 local->int_scan_req->n_channels = i;
904 576
905 local->network_latency_notifier.notifier_call = 577 local->network_latency_notifier.notifier_call =
906 ieee80211_max_network_latency; 578 ieee80211_max_network_latency;
@@ -923,7 +595,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
923 fail_wep: 595 fail_wep:
924 sta_info_stop(local); 596 sta_info_stop(local);
925 fail_sta_info: 597 fail_sta_info:
926 debugfs_hw_del(local);
927 destroy_workqueue(local->workqueue); 598 destroy_workqueue(local->workqueue);
928 fail_workqueue: 599 fail_workqueue:
929 wiphy_unregister(local->hw.wiphy); 600 wiphy_unregister(local->hw.wiphy);
@@ -959,10 +630,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
959 ieee80211_clear_tx_pending(local); 630 ieee80211_clear_tx_pending(local);
960 sta_info_stop(local); 631 sta_info_stop(local);
961 rate_control_deinitialize(local); 632 rate_control_deinitialize(local);
962 debugfs_hw_del(local);
963 633
964 if (skb_queue_len(&local->skb_queue) 634 if (skb_queue_len(&local->skb_queue) ||
965 || skb_queue_len(&local->skb_queue_unreliable)) 635 skb_queue_len(&local->skb_queue_unreliable))
966 printk(KERN_WARNING "%s: skb_queue not empty\n", 636 printk(KERN_WARNING "%s: skb_queue not empty\n",
967 wiphy_name(local->hw.wiphy)); 637 wiphy_name(local->hw.wiphy));
968 skb_queue_purge(&local->skb_queue); 638 skb_queue_purge(&local->skb_queue);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 9a733890eb47..c0fe46493f71 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 open80211s Ltd. 2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Authors: Luis Carlos Cobo <luisca@cozybit.com> 3 * Authors: Luis Carlos Cobo <luisca@cozybit.com>
4 * Javier Cardona <javier@cozybit.com> 4 * Javier Cardona <javier@cozybit.com>
5 * 5 *
@@ -14,18 +14,14 @@
14 14
15#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) 15#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
16#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) 16#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
17#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
17 18
18#define PP_OFFSET 1 /* Path Selection Protocol */ 19#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
19#define PM_OFFSET 5 /* Path Selection Metric */ 20#define MESHCONF_CAPAB_FORWARDING 0x08
20#define CC_OFFSET 9 /* Congestion Control Mode */
21#define SP_OFFSET 13 /* Synchronization Protocol */
22#define AUTH_OFFSET 17 /* Authentication Protocol */
23#define CAPAB_OFFSET 22
24#define CAPAB_ACCEPT_PLINKS 0x80
25#define CAPAB_FORWARDING 0x10
26 21
27#define TMR_RUNNING_HK 0 22#define TMR_RUNNING_HK 0
28#define TMR_RUNNING_MP 1 23#define TMR_RUNNING_MP 1
24#define TMR_RUNNING_MPR 2
29 25
30int mesh_allocated; 26int mesh_allocated;
31static struct kmem_cache *rm_cache; 27static struct kmem_cache *rm_cache;
@@ -85,11 +81,11 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
85 */ 81 */
86 if (ifmsh->mesh_id_len == ie->mesh_id_len && 82 if (ifmsh->mesh_id_len == ie->mesh_id_len &&
87 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && 83 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
88 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && 84 (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
89 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && 85 (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
90 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0 && 86 (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
91 memcmp(ifmsh->mesh_sp_id, ie->mesh_config + SP_OFFSET, 4) == 0 && 87 (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
92 memcmp(ifmsh->mesh_auth_id, ie->mesh_config + AUTH_OFFSET, 4) == 0) 88 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))
93 return true; 89 return true;
94 90
95 return false; 91 return false;
@@ -102,7 +98,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
102 */ 98 */
103bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) 99bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
104{ 100{
105 return (*(ie->mesh_config + CAPAB_OFFSET) & CAPAB_ACCEPT_PLINKS) != 0; 101 return (ie->mesh_config->meshconf_cap &
102 MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
106} 103}
107 104
108/** 105/**
@@ -128,18 +125,11 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
128 125
129void mesh_ids_set_default(struct ieee80211_if_mesh *sta) 126void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
130{ 127{
131 u8 oui[3] = {0x00, 0x0F, 0xAC}; 128 sta->mesh_pp_id = 0; /* HWMP */
132 129 sta->mesh_pm_id = 0; /* Airtime */
133 memcpy(sta->mesh_pp_id, oui, sizeof(oui)); 130 sta->mesh_cc_id = 0; /* Disabled */
134 memcpy(sta->mesh_pm_id, oui, sizeof(oui)); 131 sta->mesh_sp_id = 0; /* Neighbor Offset */
135 memcpy(sta->mesh_cc_id, oui, sizeof(oui)); 132 sta->mesh_auth_id = 0; /* Disabled */
136 memcpy(sta->mesh_sp_id, oui, sizeof(oui));
137 memcpy(sta->mesh_auth_id, oui, sizeof(oui));
138 sta->mesh_pp_id[sizeof(oui)] = 0;
139 sta->mesh_pm_id[sizeof(oui)] = 0;
140 sta->mesh_cc_id[sizeof(oui)] = 0xff;
141 sta->mesh_sp_id[sizeof(oui)] = 0xff;
142 sta->mesh_auth_id[sizeof(oui)] = 0x0;
143} 133}
144 134
145int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) 135int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -205,8 +195,8 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
205 list_del(&p->list); 195 list_del(&p->list);
206 kmem_cache_free(rm_cache, p); 196 kmem_cache_free(rm_cache, p);
207 --entries; 197 --entries;
208 } else if ((seqnum == p->seqnum) 198 } else if ((seqnum == p->seqnum) &&
209 && (memcmp(sa, p->sa, ETH_ALEN) == 0)) 199 (memcmp(sa, p->sa, ETH_ALEN) == 0))
210 return -1; 200 return -1;
211 } 201 }
212 202
@@ -228,6 +218,7 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
228 struct ieee80211_supported_band *sband; 218 struct ieee80211_supported_band *sband;
229 u8 *pos; 219 u8 *pos;
230 int len, i, rate; 220 int len, i, rate;
221 u8 neighbors;
231 222
232 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 223 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
233 len = sband->n_bitrates; 224 len = sband->n_bitrates;
@@ -251,46 +242,49 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
251 } 242 }
252 } 243 }
253 244
245 if (sband->band == IEEE80211_BAND_2GHZ) {
246 pos = skb_put(skb, 2 + 1);
247 *pos++ = WLAN_EID_DS_PARAMS;
248 *pos++ = 1;
249 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
250 }
251
254 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); 252 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
255 *pos++ = WLAN_EID_MESH_ID; 253 *pos++ = WLAN_EID_MESH_ID;
256 *pos++ = sdata->u.mesh.mesh_id_len; 254 *pos++ = sdata->u.mesh.mesh_id_len;
257 if (sdata->u.mesh.mesh_id_len) 255 if (sdata->u.mesh.mesh_id_len)
258 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); 256 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
259 257
260 pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN); 258 pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie));
261 *pos++ = WLAN_EID_MESH_CONFIG; 259 *pos++ = WLAN_EID_MESH_CONFIG;
262 *pos++ = IEEE80211_MESH_CONFIG_LEN; 260 *pos++ = sizeof(struct ieee80211_meshconf_ie);
263 /* Version */
264 *pos++ = 1;
265 261
266 /* Active path selection protocol ID */ 262 /* Active path selection protocol ID */
267 memcpy(pos, sdata->u.mesh.mesh_pp_id, 4); 263 *pos++ = sdata->u.mesh.mesh_pp_id;
268 pos += 4;
269 264
270 /* Active path selection metric ID */ 265 /* Active path selection metric ID */
271 memcpy(pos, sdata->u.mesh.mesh_pm_id, 4); 266 *pos++ = sdata->u.mesh.mesh_pm_id;
272 pos += 4;
273 267
274 /* Congestion control mode identifier */ 268 /* Congestion control mode identifier */
275 memcpy(pos, sdata->u.mesh.mesh_cc_id, 4); 269 *pos++ = sdata->u.mesh.mesh_cc_id;
276 pos += 4;
277 270
278 /* Synchronization protocol identifier */ 271 /* Synchronization protocol identifier */
279 memcpy(pos, sdata->u.mesh.mesh_sp_id, 4); 272 *pos++ = sdata->u.mesh.mesh_sp_id;
280 pos += 4;
281 273
282 /* Authentication Protocol identifier */ 274 /* Authentication Protocol identifier */
283 memcpy(pos, sdata->u.mesh.mesh_auth_id, 4); 275 *pos++ = sdata->u.mesh.mesh_auth_id;
284 pos += 4;
285 276
286 /* Mesh Formation Info */ 277 /* Mesh Formation Info - number of neighbors */
287 memset(pos, 0x00, 1); 278 neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
288 pos += 1; 279 /* Number of neighbor mesh STAs or 15 whichever is smaller */
280 neighbors = (neighbors > 15) ? 15 : neighbors;
281 *pos++ = neighbors << 1;
289 282
290 /* Mesh capability */ 283 /* Mesh capability */
291 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); 284 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
292 *pos = CAPAB_FORWARDING; 285 *pos = MESHCONF_CAPAB_FORWARDING;
293 *pos++ |= sdata->u.mesh.accepting_plinks ? CAPAB_ACCEPT_PLINKS : 0x00; 286 *pos++ |= sdata->u.mesh.accepting_plinks ?
287 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
294 *pos++ = 0x00; 288 *pos++ = 0x00;
295 289
296 return; 290 return;
@@ -355,6 +349,34 @@ static void ieee80211_mesh_path_timer(unsigned long data)
355 ieee80211_queue_work(&local->hw, &ifmsh->work); 349 ieee80211_queue_work(&local->hw, &ifmsh->work);
356} 350}
357 351
352static void ieee80211_mesh_path_root_timer(unsigned long data)
353{
354 struct ieee80211_sub_if_data *sdata =
355 (struct ieee80211_sub_if_data *) data;
356 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
357 struct ieee80211_local *local = sdata->local;
358
359 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
360
361 if (local->quiescing) {
362 set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
363 return;
364 }
365
366 ieee80211_queue_work(&local->hw, &ifmsh->work);
367}
368
369void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
370{
371 if (ifmsh->mshcfg.dot11MeshHWMPRootMode)
372 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
373 else {
374 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
375 /* stop running timer */
376 del_timer_sync(&ifmsh->mesh_path_root_timer);
377 }
378}
379
358/** 380/**
359 * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame 381 * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
360 * @hdr: 802.11 frame header 382 * @hdr: 802.11 frame header
@@ -365,8 +387,9 @@ static void ieee80211_mesh_path_timer(unsigned long data)
365 * 387 *
366 * Return the length of the 802.11 (does not include a mesh control header) 388 * Return the length of the 802.11 (does not include a mesh control header)
367 */ 389 */
368int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, char 390int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
369 *meshda, char *meshsa) { 391 const u8 *meshda, const u8 *meshsa)
392{
370 if (is_multicast_ether_addr(meshda)) { 393 if (is_multicast_ether_addr(meshda)) {
371 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 394 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
372 /* DA TA SA */ 395 /* DA TA SA */
@@ -448,6 +471,15 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
448 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 471 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
449} 472}
450 473
474static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
475{
476 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
477
478 mesh_path_tx_root_frame(sdata);
479 mod_timer(&ifmsh->mesh_path_root_timer,
480 round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL));
481}
482
451#ifdef CONFIG_PM 483#ifdef CONFIG_PM
452void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) 484void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
453{ 485{
@@ -462,6 +494,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
462 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); 494 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
463 if (del_timer_sync(&ifmsh->mesh_path_timer)) 495 if (del_timer_sync(&ifmsh->mesh_path_timer))
464 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); 496 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
497 if (del_timer_sync(&ifmsh->mesh_path_root_timer))
498 set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
465} 499}
466 500
467void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) 501void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
@@ -472,6 +506,9 @@ void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
472 add_timer(&ifmsh->housekeeping_timer); 506 add_timer(&ifmsh->housekeeping_timer);
473 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running)) 507 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
474 add_timer(&ifmsh->mesh_path_timer); 508 add_timer(&ifmsh->mesh_path_timer);
509 if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running))
510 add_timer(&ifmsh->mesh_path_root_timer);
511 ieee80211_mesh_root_setup(ifmsh);
475} 512}
476#endif 513#endif
477 514
@@ -481,6 +518,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
481 struct ieee80211_local *local = sdata->local; 518 struct ieee80211_local *local = sdata->local;
482 519
483 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); 520 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
521 ieee80211_mesh_root_setup(ifmsh);
484 ieee80211_queue_work(&local->hw, &ifmsh->work); 522 ieee80211_queue_work(&local->hw, &ifmsh->work);
485 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 523 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
486 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 524 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
@@ -491,6 +529,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
491void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 529void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
492{ 530{
493 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 531 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
532 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
494 /* 533 /*
495 * If the timer fired while we waited for it, it will have 534 * If the timer fired while we waited for it, it will have
496 * requeued the work. Now the work will be running again 535 * requeued the work. Now the work will be running again
@@ -561,7 +600,7 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
561 struct ieee80211_rx_status *rx_status) 600 struct ieee80211_rx_status *rx_status)
562{ 601{
563 switch (mgmt->u.action.category) { 602 switch (mgmt->u.action.category) {
564 case PLINK_CATEGORY: 603 case MESH_PLINK_CATEGORY:
565 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 604 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
566 break; 605 break;
567 case MESH_PATH_SEL_CATEGORY: 606 case MESH_PATH_SEL_CATEGORY:
@@ -628,6 +667,9 @@ static void ieee80211_mesh_work(struct work_struct *work)
628 667
629 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) 668 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
630 ieee80211_mesh_housekeeping(sdata, ifmsh); 669 ieee80211_mesh_housekeeping(sdata, ifmsh);
670
671 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
672 ieee80211_mesh_rootpath(sdata);
631} 673}
632 674
633void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) 675void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -673,7 +715,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
673 MESH_MIN_DISCOVERY_TIMEOUT; 715 MESH_MIN_DISCOVERY_TIMEOUT;
674 ifmsh->accepting_plinks = true; 716 ifmsh->accepting_plinks = true;
675 ifmsh->preq_id = 0; 717 ifmsh->preq_id = 0;
676 ifmsh->dsn = 0; 718 ifmsh->sn = 0;
677 atomic_set(&ifmsh->mpaths, 0); 719 atomic_set(&ifmsh->mpaths, 0);
678 mesh_rmc_init(sdata); 720 mesh_rmc_init(sdata);
679 ifmsh->last_preq = jiffies; 721 ifmsh->last_preq = jiffies;
@@ -684,6 +726,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
684 setup_timer(&ifmsh->mesh_path_timer, 726 setup_timer(&ifmsh->mesh_path_timer,
685 ieee80211_mesh_path_timer, 727 ieee80211_mesh_path_timer,
686 (unsigned long) sdata); 728 (unsigned long) sdata);
729 setup_timer(&ifmsh->mesh_path_root_timer,
730 ieee80211_mesh_path_root_timer,
731 (unsigned long) sdata);
687 INIT_LIST_HEAD(&ifmsh->preq_queue.list); 732 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
688 spin_lock_init(&ifmsh->mesh_preq_queue_lock); 733 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
689} 734}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index dd1c19319f0a..31e102541869 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 open80211s Ltd. 2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Authors: Luis Carlos Cobo <luisca@cozybit.com> 3 * Authors: Luis Carlos Cobo <luisca@cozybit.com>
4 * Javier Cardona <javier@cozybit.com> 4 * Javier Cardona <javier@cozybit.com>
5 * 5 *
@@ -26,7 +26,7 @@
26 * 26 *
27 * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding 27 * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
28 * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path 28 * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
29 * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence 29 * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
30 * number 30 * number
31 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be 31 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
32 * modified 32 * modified
@@ -38,7 +38,7 @@
38enum mesh_path_flags { 38enum mesh_path_flags {
39 MESH_PATH_ACTIVE = BIT(0), 39 MESH_PATH_ACTIVE = BIT(0),
40 MESH_PATH_RESOLVING = BIT(1), 40 MESH_PATH_RESOLVING = BIT(1),
41 MESH_PATH_DSN_VALID = BIT(2), 41 MESH_PATH_SN_VALID = BIT(2),
42 MESH_PATH_FIXED = BIT(3), 42 MESH_PATH_FIXED = BIT(3),
43 MESH_PATH_RESOLVED = BIT(4), 43 MESH_PATH_RESOLVED = BIT(4),
44}; 44};
@@ -53,11 +53,13 @@ enum mesh_path_flags {
53 * to grow. 53 * to grow.
54 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to 54 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
55 * grow 55 * grow
56 * @MESH_WORK_ROOT: the mesh root station needs to send a frame
56 */ 57 */
57enum mesh_deferred_task_flags { 58enum mesh_deferred_task_flags {
58 MESH_WORK_HOUSEKEEPING, 59 MESH_WORK_HOUSEKEEPING,
59 MESH_WORK_GROW_MPATH_TABLE, 60 MESH_WORK_GROW_MPATH_TABLE,
60 MESH_WORK_GROW_MPP_TABLE, 61 MESH_WORK_GROW_MPP_TABLE,
62 MESH_WORK_ROOT,
61}; 63};
62 64
63/** 65/**
@@ -70,7 +72,7 @@ enum mesh_deferred_task_flags {
70 * @timer: mesh path discovery timer 72 * @timer: mesh path discovery timer
71 * @frame_queue: pending queue for frames sent to this destination while the 73 * @frame_queue: pending queue for frames sent to this destination while the
72 * path is unresolved 74 * path is unresolved
73 * @dsn: destination sequence number of the destination 75 * @sn: target sequence number
74 * @metric: current metric to this destination 76 * @metric: current metric to this destination
75 * @hop_count: hops to destination 77 * @hop_count: hops to destination
76 * @exp_time: in jiffies, when the path will expire or when it expired 78 * @exp_time: in jiffies, when the path will expire or when it expired
@@ -94,7 +96,7 @@ struct mesh_path {
94 struct timer_list timer; 96 struct timer_list timer;
95 struct sk_buff_head frame_queue; 97 struct sk_buff_head frame_queue;
96 struct rcu_head rcu; 98 struct rcu_head rcu;
97 u32 dsn; 99 u32 sn;
98 u32 metric; 100 u32 metric;
99 u8 hop_count; 101 u8 hop_count;
100 unsigned long exp_time; 102 unsigned long exp_time;
@@ -174,7 +176,7 @@ struct mesh_rmc {
174#define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2) 176#define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2)
175 177
176/* Default values, timeouts in ms */ 178/* Default values, timeouts in ms */
177#define MESH_TTL 5 179#define MESH_TTL 31
178#define MESH_MAX_RETR 3 180#define MESH_MAX_RETR 3
179#define MESH_RET_T 100 181#define MESH_RET_T 100
180#define MESH_CONF_T 100 182#define MESH_CONF_T 100
@@ -206,13 +208,19 @@ struct mesh_rmc {
206#define MESH_MAX_MPATHS 1024 208#define MESH_MAX_MPATHS 1024
207 209
208/* Pending ANA approval */ 210/* Pending ANA approval */
209#define PLINK_CATEGORY 30 211#define MESH_PLINK_CATEGORY 30
210#define MESH_PATH_SEL_CATEGORY 32 212#define MESH_PATH_SEL_CATEGORY 32
213#define MESH_PATH_SEL_ACTION 0
214
215/* PERR reason codes */
216#define PEER_RCODE_UNSPECIFIED 11
217#define PERR_RCODE_NO_ROUTE 12
218#define PERR_RCODE_DEST_UNREACH 13
211 219
212/* Public interfaces */ 220/* Public interfaces */
213/* Various */ 221/* Various */
214int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, 222int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
215 char *da, char *sa); 223 const u8 *da, const u8 *sa);
216int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 224int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
217 struct ieee80211_sub_if_data *sdata, char *addr4, 225 struct ieee80211_sub_if_data *sdata, char *addr4,
218 char *addr5, char *addr6); 226 char *addr5, char *addr6);
@@ -234,6 +242,7 @@ ieee80211_rx_result
234ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 242ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
235void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 243void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
236void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 244void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
245void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
237 246
238/* Mesh paths */ 247/* Mesh paths */
239int mesh_nexthop_lookup(struct sk_buff *skb, 248int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -274,8 +283,8 @@ void mesh_mpp_table_grow(void);
274u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, 283u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
275 struct mesh_table *tbl); 284 struct mesh_table *tbl);
276/* Mesh paths */ 285/* Mesh paths */
277int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, 286int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
278 struct ieee80211_sub_if_data *sdata); 287 const u8 *ra, struct ieee80211_sub_if_data *sdata);
279void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); 288void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
280void mesh_path_flush_pending(struct mesh_path *mpath); 289void mesh_path_flush_pending(struct mesh_path *mpath);
281void mesh_path_tx_pending(struct mesh_path *mpath); 290void mesh_path_tx_pending(struct mesh_path *mpath);
@@ -288,6 +297,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
288 struct ieee80211_sub_if_data *sdata); 297 struct ieee80211_sub_if_data *sdata);
289void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 298void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
290void mesh_path_restart(struct ieee80211_sub_if_data *sdata); 299void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
300void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
291 301
292extern int mesh_paths_generation; 302extern int mesh_paths_generation;
293 303
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index e12a786e26b8..833b2f3670c5 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 open80211s Ltd. 2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com> 3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -9,6 +9,12 @@
9 9
10#include "mesh.h" 10#include "mesh.h"
11 11
12#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
13#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args)
14#else
15#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
16#endif
17
12#define TEST_FRAME_LEN 8192 18#define TEST_FRAME_LEN 8192
13#define MAX_METRIC 0xffffffff 19#define MAX_METRIC 0xffffffff
14#define ARITH_SHIFT 8 20#define ARITH_SHIFT 8
@@ -21,6 +27,12 @@
21#define MP_F_DO 0x1 27#define MP_F_DO 0x1
22/* Reply and forward */ 28/* Reply and forward */
23#define MP_F_RF 0x2 29#define MP_F_RF 0x2
30/* Unknown Sequence Number */
31#define MP_F_USN 0x01
32/* Reason code Present */
33#define MP_F_RCODE 0x02
34
35static void mesh_queue_preq(struct mesh_path *, u8);
24 36
25static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) 37static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
26{ 38{
@@ -29,6 +41,13 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
29 return get_unaligned_le32(preq_elem + offset); 41 return get_unaligned_le32(preq_elem + offset);
30} 42}
31 43
44static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
45{
46 if (ae)
47 offset += 6;
48 return get_unaligned_le16(preq_elem + offset);
49}
50
32/* HWMP IE processing macros */ 51/* HWMP IE processing macros */
33#define AE_F (1<<6) 52#define AE_F (1<<6)
34#define AE_F_SET(x) (*x & AE_F) 53#define AE_F_SET(x) (*x & AE_F)
@@ -37,30 +56,33 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
37#define PREQ_IE_TTL(x) (*(x + 2)) 56#define PREQ_IE_TTL(x) (*(x + 2))
38#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) 57#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
39#define PREQ_IE_ORIG_ADDR(x) (x + 7) 58#define PREQ_IE_ORIG_ADDR(x) (x + 7)
40#define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0); 59#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0);
41#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)); 60#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
42#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)); 61#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
43#define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) 62#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
44#define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) 63#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
45#define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x)); 64#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x));
46 65
47 66
48#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) 67#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
49#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) 68#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
50#define PREP_IE_TTL(x) PREQ_IE_TTL(x) 69#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
51#define PREP_IE_ORIG_ADDR(x) (x + 3) 70#define PREP_IE_ORIG_ADDR(x) (x + 3)
52#define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0); 71#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0);
53#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)); 72#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
54#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)); 73#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
55#define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) 74#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
56#define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x)); 75#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x));
57 76
58#define PERR_IE_DST_ADDR(x) (x + 2) 77#define PERR_IE_TTL(x) (*(x))
59#define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0); 78#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
79#define PERR_IE_TARGET_ADDR(x) (x + 3)
80#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0);
81#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0);
60 82
61#define MSEC_TO_TU(x) (x*1000/1024) 83#define MSEC_TO_TU(x) (x*1000/1024)
62#define DSN_GT(x, y) ((long) (y) - (long) (x) < 0) 84#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
63#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) 85#define SN_LT(x, y) ((long) (x) - (long) (y) < 0)
64 86
65#define net_traversal_jiffies(s) \ 87#define net_traversal_jiffies(s) \
66 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 88 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -75,13 +97,17 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
75enum mpath_frame_type { 97enum mpath_frame_type {
76 MPATH_PREQ = 0, 98 MPATH_PREQ = 0,
77 MPATH_PREP, 99 MPATH_PREP,
78 MPATH_PERR 100 MPATH_PERR,
101 MPATH_RANN
79}; 102};
80 103
104static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
105
81static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 106static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
82 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, 107 u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
83 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, 108 __le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
84 __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata) 109 __le32 lifetime, __le32 metric, __le32 preq_id,
110 struct ieee80211_sub_if_data *sdata)
85{ 111{
86 struct ieee80211_local *local = sdata->local; 112 struct ieee80211_local *local = sdata->local;
87 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 113 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
@@ -103,21 +129,30 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
103 129
104 memcpy(mgmt->da, da, ETH_ALEN); 130 memcpy(mgmt->da, da, ETH_ALEN);
105 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 131 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
106 /* BSSID is left zeroed, wildcard value */ 132 /* BSSID == SA */
133 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
107 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
108 mgmt->u.action.u.mesh_action.action_code = action; 135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
109 136
110 switch (action) { 137 switch (action) {
111 case MPATH_PREQ: 138 case MPATH_PREQ:
139 mhwmp_dbg("sending PREQ to %pM\n", target);
112 ie_len = 37; 140 ie_len = 37;
113 pos = skb_put(skb, 2 + ie_len); 141 pos = skb_put(skb, 2 + ie_len);
114 *pos++ = WLAN_EID_PREQ; 142 *pos++ = WLAN_EID_PREQ;
115 break; 143 break;
116 case MPATH_PREP: 144 case MPATH_PREP:
145 mhwmp_dbg("sending PREP to %pM\n", target);
117 ie_len = 31; 146 ie_len = 31;
118 pos = skb_put(skb, 2 + ie_len); 147 pos = skb_put(skb, 2 + ie_len);
119 *pos++ = WLAN_EID_PREP; 148 *pos++ = WLAN_EID_PREP;
120 break; 149 break;
150 case MPATH_RANN:
151 mhwmp_dbg("sending RANN from %pM\n", orig_addr);
152 ie_len = sizeof(struct ieee80211_rann_ie);
153 pos = skb_put(skb, 2 + ie_len);
154 *pos++ = WLAN_EID_RANN;
155 break;
121 default: 156 default:
122 kfree_skb(skb); 157 kfree_skb(skb);
123 return -ENOTSUPP; 158 return -ENOTSUPP;
@@ -133,34 +168,40 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
133 } 168 }
134 memcpy(pos, orig_addr, ETH_ALEN); 169 memcpy(pos, orig_addr, ETH_ALEN);
135 pos += ETH_ALEN; 170 pos += ETH_ALEN;
136 memcpy(pos, &orig_dsn, 4); 171 memcpy(pos, &orig_sn, 4);
137 pos += 4;
138 memcpy(pos, &lifetime, 4);
139 pos += 4; 172 pos += 4;
173 if (action != MPATH_RANN) {
174 memcpy(pos, &lifetime, 4);
175 pos += 4;
176 }
140 memcpy(pos, &metric, 4); 177 memcpy(pos, &metric, 4);
141 pos += 4; 178 pos += 4;
142 if (action == MPATH_PREQ) { 179 if (action == MPATH_PREQ) {
143 /* destination count */ 180 /* destination count */
144 *pos++ = 1; 181 *pos++ = 1;
145 *pos++ = dst_flags; 182 *pos++ = target_flags;
183 }
184 if (action != MPATH_RANN) {
185 memcpy(pos, target, ETH_ALEN);
186 pos += ETH_ALEN;
187 memcpy(pos, &target_sn, 4);
146 } 188 }
147 memcpy(pos, dst, ETH_ALEN);
148 pos += ETH_ALEN;
149 memcpy(pos, &dst_dsn, 4);
150 189
151 ieee80211_tx_skb(sdata, skb, 1); 190 ieee80211_tx_skb(sdata, skb);
152 return 0; 191 return 0;
153} 192}
154 193
155/** 194/**
156 * mesh_send_path error - Sends a PERR mesh management frame 195 * mesh_send_path error - Sends a PERR mesh management frame
157 * 196 *
158 * @dst: broken destination 197 * @target: broken destination
159 * @dst_dsn: dsn of the broken destination 198 * @target_sn: SN of the broken destination
199 * @target_rcode: reason code for this PERR
160 * @ra: node this frame is addressed to 200 * @ra: node this frame is addressed to
161 */ 201 */
162int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, 202int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
163 struct ieee80211_sub_if_data *sdata) 203 __le16 target_rcode, const u8 *ra,
204 struct ieee80211_sub_if_data *sdata)
164{ 205{
165 struct ieee80211_local *local = sdata->local; 206 struct ieee80211_local *local = sdata->local;
166 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 207 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
@@ -184,20 +225,32 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
184 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 225 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
185 /* BSSID is left zeroed, wildcard value */ 226 /* BSSID is left zeroed, wildcard value */
186 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
187 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; 228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
188 ie_len = 12; 229 ie_len = 15;
189 pos = skb_put(skb, 2 + ie_len); 230 pos = skb_put(skb, 2 + ie_len);
190 *pos++ = WLAN_EID_PERR; 231 *pos++ = WLAN_EID_PERR;
191 *pos++ = ie_len; 232 *pos++ = ie_len;
192 /* mode flags, reserved */ 233 /* ttl */
193 *pos++ = 0; 234 *pos++ = MESH_TTL;
194 /* number of destinations */ 235 /* number of destinations */
195 *pos++ = 1; 236 *pos++ = 1;
196 memcpy(pos, dst, ETH_ALEN); 237 /*
238 * flags bit, bit 1 is unset if we know the sequence number and
239 * bit 2 is set if we have a reason code
240 */
241 *pos = 0;
242 if (!target_sn)
243 *pos |= MP_F_USN;
244 if (target_rcode)
245 *pos |= MP_F_RCODE;
246 pos++;
247 memcpy(pos, target, ETH_ALEN);
197 pos += ETH_ALEN; 248 pos += ETH_ALEN;
198 memcpy(pos, &dst_dsn, 4); 249 memcpy(pos, &target_sn, 4);
250 pos += 4;
251 memcpy(pos, &target_rcode, 2);
199 252
200 ieee80211_tx_skb(sdata, skb, 1); 253 ieee80211_tx_skb(sdata, skb);
201 return 0; 254 return 0;
202} 255}
203 256
@@ -259,7 +312,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
259 * @hwmp_ie: hwmp information element (PREP or PREQ) 312 * @hwmp_ie: hwmp information element (PREP or PREQ)
260 * 313 *
261 * This function updates the path routing information to the originator and the 314 * This function updates the path routing information to the originator and the
262 * transmitter of a HWMP PREQ or PREP fram. 315 * transmitter of a HWMP PREQ or PREP frame.
263 * 316 *
264 * Returns: metric to frame originator or 0 if the frame should not be further 317 * Returns: metric to frame originator or 0 if the frame should not be further
265 * processed 318 * processed
@@ -269,18 +322,17 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
269 */ 322 */
270static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, 323static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
271 struct ieee80211_mgmt *mgmt, 324 struct ieee80211_mgmt *mgmt,
272 u8 *hwmp_ie) 325 u8 *hwmp_ie, enum mpath_frame_type action)
273{ 326{
274 struct ieee80211_local *local = sdata->local; 327 struct ieee80211_local *local = sdata->local;
275 struct mesh_path *mpath; 328 struct mesh_path *mpath;
276 struct sta_info *sta; 329 struct sta_info *sta;
277 bool fresh_info; 330 bool fresh_info;
278 u8 *orig_addr, *ta; 331 u8 *orig_addr, *ta;
279 u32 orig_dsn, orig_metric; 332 u32 orig_sn, orig_metric;
280 unsigned long orig_lifetime, exp_time; 333 unsigned long orig_lifetime, exp_time;
281 u32 last_hop_metric, new_metric; 334 u32 last_hop_metric, new_metric;
282 bool process = true; 335 bool process = true;
283 u8 action = mgmt->u.action.u.mesh_action.action_code;
284 336
285 rcu_read_lock(); 337 rcu_read_lock();
286 sta = sta_info_get(local, mgmt->sa); 338 sta = sta_info_get(local, mgmt->sa);
@@ -296,7 +348,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
296 switch (action) { 348 switch (action) {
297 case MPATH_PREQ: 349 case MPATH_PREQ:
298 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); 350 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
299 orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie); 351 orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
300 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); 352 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
301 orig_metric = PREQ_IE_METRIC(hwmp_ie); 353 orig_metric = PREQ_IE_METRIC(hwmp_ie);
302 break; 354 break;
@@ -309,7 +361,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
309 * information from both PREQ and PREP frames. 361 * information from both PREQ and PREP frames.
310 */ 362 */
311 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); 363 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
312 orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie); 364 orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
313 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); 365 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
314 orig_metric = PREP_IE_METRIC(hwmp_ie); 366 orig_metric = PREP_IE_METRIC(hwmp_ie);
315 break; 367 break;
@@ -335,9 +387,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
335 if (mpath->flags & MESH_PATH_FIXED) 387 if (mpath->flags & MESH_PATH_FIXED)
336 fresh_info = false; 388 fresh_info = false;
337 else if ((mpath->flags & MESH_PATH_ACTIVE) && 389 else if ((mpath->flags & MESH_PATH_ACTIVE) &&
338 (mpath->flags & MESH_PATH_DSN_VALID)) { 390 (mpath->flags & MESH_PATH_SN_VALID)) {
339 if (DSN_GT(mpath->dsn, orig_dsn) || 391 if (SN_GT(mpath->sn, orig_sn) ||
340 (mpath->dsn == orig_dsn && 392 (mpath->sn == orig_sn &&
341 action == MPATH_PREQ && 393 action == MPATH_PREQ &&
342 new_metric > mpath->metric)) { 394 new_metric > mpath->metric)) {
343 process = false; 395 process = false;
@@ -356,9 +408,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
356 408
357 if (fresh_info) { 409 if (fresh_info) {
358 mesh_path_assign_nexthop(mpath, sta); 410 mesh_path_assign_nexthop(mpath, sta);
359 mpath->flags |= MESH_PATH_DSN_VALID; 411 mpath->flags |= MESH_PATH_SN_VALID;
360 mpath->metric = new_metric; 412 mpath->metric = new_metric;
361 mpath->dsn = orig_dsn; 413 mpath->sn = orig_sn;
362 mpath->exp_time = time_after(mpath->exp_time, exp_time) 414 mpath->exp_time = time_after(mpath->exp_time, exp_time)
363 ? mpath->exp_time : exp_time; 415 ? mpath->exp_time : exp_time;
364 mesh_path_activate(mpath); 416 mesh_path_activate(mpath);
@@ -397,7 +449,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
397 449
398 if (fresh_info) { 450 if (fresh_info) {
399 mesh_path_assign_nexthop(mpath, sta); 451 mesh_path_assign_nexthop(mpath, sta);
400 mpath->flags &= ~MESH_PATH_DSN_VALID; 452 mpath->flags &= ~MESH_PATH_SN_VALID;
401 mpath->metric = last_hop_metric; 453 mpath->metric = last_hop_metric;
402 mpath->exp_time = time_after(mpath->exp_time, exp_time) 454 mpath->exp_time = time_after(mpath->exp_time, exp_time)
403 ? mpath->exp_time : exp_time; 455 ? mpath->exp_time : exp_time;
@@ -419,44 +471,47 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
419{ 471{
420 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 472 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
421 struct mesh_path *mpath; 473 struct mesh_path *mpath;
422 u8 *dst_addr, *orig_addr; 474 u8 *target_addr, *orig_addr;
423 u8 dst_flags, ttl; 475 u8 target_flags, ttl;
424 u32 orig_dsn, dst_dsn, lifetime; 476 u32 orig_sn, target_sn, lifetime;
425 bool reply = false; 477 bool reply = false;
426 bool forward = true; 478 bool forward = true;
427 479
428 /* Update destination DSN, if present */ 480 /* Update target SN, if present */
429 dst_addr = PREQ_IE_DST_ADDR(preq_elem); 481 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
430 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); 482 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
431 dst_dsn = PREQ_IE_DST_DSN(preq_elem); 483 target_sn = PREQ_IE_TARGET_SN(preq_elem);
432 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); 484 orig_sn = PREQ_IE_ORIG_SN(preq_elem);
433 dst_flags = PREQ_IE_DST_F(preq_elem); 485 target_flags = PREQ_IE_TARGET_F(preq_elem);
434 486
435 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 487 mhwmp_dbg("received PREQ from %pM\n", orig_addr);
488
489 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n");
436 forward = false; 491 forward = false;
437 reply = true; 492 reply = true;
438 metric = 0; 493 metric = 0;
439 if (time_after(jiffies, ifmsh->last_dsn_update + 494 if (time_after(jiffies, ifmsh->last_sn_update +
440 net_traversal_jiffies(sdata)) || 495 net_traversal_jiffies(sdata)) ||
441 time_before(jiffies, ifmsh->last_dsn_update)) { 496 time_before(jiffies, ifmsh->last_sn_update)) {
442 dst_dsn = ++ifmsh->dsn; 497 target_sn = ++ifmsh->sn;
443 ifmsh->last_dsn_update = jiffies; 498 ifmsh->last_sn_update = jiffies;
444 } 499 }
445 } else { 500 } else {
446 rcu_read_lock(); 501 rcu_read_lock();
447 mpath = mesh_path_lookup(dst_addr, sdata); 502 mpath = mesh_path_lookup(target_addr, sdata);
448 if (mpath) { 503 if (mpath) {
449 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || 504 if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
450 DSN_LT(mpath->dsn, dst_dsn)) { 505 SN_LT(mpath->sn, target_sn)) {
451 mpath->dsn = dst_dsn; 506 mpath->sn = target_sn;
452 mpath->flags |= MESH_PATH_DSN_VALID; 507 mpath->flags |= MESH_PATH_SN_VALID;
453 } else if ((!(dst_flags & MP_F_DO)) && 508 } else if ((!(target_flags & MP_F_DO)) &&
454 (mpath->flags & MESH_PATH_ACTIVE)) { 509 (mpath->flags & MESH_PATH_ACTIVE)) {
455 reply = true; 510 reply = true;
456 metric = mpath->metric; 511 metric = mpath->metric;
457 dst_dsn = mpath->dsn; 512 target_sn = mpath->sn;
458 if (dst_flags & MP_F_RF) 513 if (target_flags & MP_F_RF)
459 dst_flags |= MP_F_DO; 514 target_flags |= MP_F_DO;
460 else 515 else
461 forward = false; 516 forward = false;
462 } 517 }
@@ -467,13 +522,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
467 if (reply) { 522 if (reply) {
468 lifetime = PREQ_IE_LIFETIME(preq_elem); 523 lifetime = PREQ_IE_LIFETIME(preq_elem);
469 ttl = ifmsh->mshcfg.dot11MeshTTL; 524 ttl = ifmsh->mshcfg.dot11MeshTTL;
470 if (ttl != 0) 525 if (ttl != 0) {
471 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, 526 mhwmp_dbg("replying to the PREQ\n");
472 cpu_to_le32(dst_dsn), 0, orig_addr, 527 mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
473 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, 528 cpu_to_le32(target_sn), 0, orig_addr,
529 cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
474 cpu_to_le32(lifetime), cpu_to_le32(metric), 530 cpu_to_le32(lifetime), cpu_to_le32(metric),
475 0, sdata); 531 0, sdata);
476 else 532 } else
477 ifmsh->mshstats.dropped_frames_ttl++; 533 ifmsh->mshstats.dropped_frames_ttl++;
478 } 534 }
479 535
@@ -487,13 +543,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
487 ifmsh->mshstats.dropped_frames_ttl++; 543 ifmsh->mshstats.dropped_frames_ttl++;
488 return; 544 return;
489 } 545 }
546 mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr);
490 --ttl; 547 --ttl;
491 flags = PREQ_IE_FLAGS(preq_elem); 548 flags = PREQ_IE_FLAGS(preq_elem);
492 preq_id = PREQ_IE_PREQ_ID(preq_elem); 549 preq_id = PREQ_IE_PREQ_ID(preq_elem);
493 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 550 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
494 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 551 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
495 cpu_to_le32(orig_dsn), dst_flags, dst_addr, 552 cpu_to_le32(orig_sn), target_flags, target_addr,
496 cpu_to_le32(dst_dsn), sdata->dev->broadcast, 553 cpu_to_le32(target_sn), broadcast_addr,
497 hopcount, ttl, cpu_to_le32(lifetime), 554 hopcount, ttl, cpu_to_le32(lifetime),
498 cpu_to_le32(metric), cpu_to_le32(preq_id), 555 cpu_to_le32(metric), cpu_to_le32(preq_id),
499 sdata); 556 sdata);
@@ -508,10 +565,12 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
508 u8 *prep_elem, u32 metric) 565 u8 *prep_elem, u32 metric)
509{ 566{
510 struct mesh_path *mpath; 567 struct mesh_path *mpath;
511 u8 *dst_addr, *orig_addr; 568 u8 *target_addr, *orig_addr;
512 u8 ttl, hopcount, flags; 569 u8 ttl, hopcount, flags;
513 u8 next_hop[ETH_ALEN]; 570 u8 next_hop[ETH_ALEN];
514 u32 dst_dsn, orig_dsn, lifetime; 571 u32 target_sn, orig_sn, lifetime;
572
573 mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem));
515 574
516 /* Note that we divert from the draft nomenclature and denominate 575 /* Note that we divert from the draft nomenclature and denominate
517 * destination to what the draft refers to as origininator. So in this 576 * destination to what the draft refers to as origininator. So in this
@@ -519,8 +578,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
519 * which corresponds with the originator of the PREQ which this PREP 578 * which corresponds with the originator of the PREQ which this PREP
520 * replies 579 * replies
521 */ 580 */
522 dst_addr = PREP_IE_DST_ADDR(prep_elem); 581 target_addr = PREP_IE_TARGET_ADDR(prep_elem);
523 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) 582 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
524 /* destination, no forwarding required */ 583 /* destination, no forwarding required */
525 return; 584 return;
526 585
@@ -531,7 +590,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
531 } 590 }
532 591
533 rcu_read_lock(); 592 rcu_read_lock();
534 mpath = mesh_path_lookup(dst_addr, sdata); 593 mpath = mesh_path_lookup(target_addr, sdata);
535 if (mpath) 594 if (mpath)
536 spin_lock_bh(&mpath->state_lock); 595 spin_lock_bh(&mpath->state_lock);
537 else 596 else
@@ -547,13 +606,13 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
547 lifetime = PREP_IE_LIFETIME(prep_elem); 606 lifetime = PREP_IE_LIFETIME(prep_elem);
548 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; 607 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
549 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 608 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
550 dst_dsn = PREP_IE_DST_DSN(prep_elem); 609 target_sn = PREP_IE_TARGET_SN(prep_elem);
551 orig_dsn = PREP_IE_ORIG_DSN(prep_elem); 610 orig_sn = PREP_IE_ORIG_SN(prep_elem);
552 611
553 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 612 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
554 cpu_to_le32(orig_dsn), 0, dst_addr, 613 cpu_to_le32(orig_sn), 0, target_addr,
555 cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl, 614 cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
556 cpu_to_le32(lifetime), cpu_to_le32(metric), 615 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
557 0, sdata); 616 0, sdata);
558 rcu_read_unlock(); 617 rcu_read_unlock();
559 618
@@ -570,32 +629,96 @@ fail:
570static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, 629static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
571 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 630 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
572{ 631{
632 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
573 struct mesh_path *mpath; 633 struct mesh_path *mpath;
574 u8 *ta, *dst_addr; 634 u8 ttl;
575 u32 dst_dsn; 635 u8 *ta, *target_addr;
636 u8 target_flags;
637 u32 target_sn;
638 u16 target_rcode;
576 639
577 ta = mgmt->sa; 640 ta = mgmt->sa;
578 dst_addr = PERR_IE_DST_ADDR(perr_elem); 641 ttl = PERR_IE_TTL(perr_elem);
579 dst_dsn = PERR_IE_DST_DSN(perr_elem); 642 if (ttl <= 1) {
643 ifmsh->mshstats.dropped_frames_ttl++;
644 return;
645 }
646 ttl--;
647 target_flags = PERR_IE_TARGET_FLAGS(perr_elem);
648 target_addr = PERR_IE_TARGET_ADDR(perr_elem);
649 target_sn = PERR_IE_TARGET_SN(perr_elem);
650 target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
651
580 rcu_read_lock(); 652 rcu_read_lock();
581 mpath = mesh_path_lookup(dst_addr, sdata); 653 mpath = mesh_path_lookup(target_addr, sdata);
582 if (mpath) { 654 if (mpath) {
583 spin_lock_bh(&mpath->state_lock); 655 spin_lock_bh(&mpath->state_lock);
584 if (mpath->flags & MESH_PATH_ACTIVE && 656 if (mpath->flags & MESH_PATH_ACTIVE &&
585 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && 657 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
586 (!(mpath->flags & MESH_PATH_DSN_VALID) || 658 (!(mpath->flags & MESH_PATH_SN_VALID) ||
587 DSN_GT(dst_dsn, mpath->dsn))) { 659 SN_GT(target_sn, mpath->sn))) {
588 mpath->flags &= ~MESH_PATH_ACTIVE; 660 mpath->flags &= ~MESH_PATH_ACTIVE;
589 mpath->dsn = dst_dsn; 661 mpath->sn = target_sn;
590 spin_unlock_bh(&mpath->state_lock); 662 spin_unlock_bh(&mpath->state_lock);
591 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), 663 mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
592 sdata->dev->broadcast, sdata); 664 cpu_to_le16(target_rcode),
665 broadcast_addr, sdata);
593 } else 666 } else
594 spin_unlock_bh(&mpath->state_lock); 667 spin_unlock_bh(&mpath->state_lock);
595 } 668 }
596 rcu_read_unlock(); 669 rcu_read_unlock();
597} 670}
598 671
672static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
673 struct ieee80211_mgmt *mgmt,
674 struct ieee80211_rann_ie *rann)
675{
676 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
677 struct mesh_path *mpath;
678 u8 *ta;
679 u8 ttl, flags, hopcount;
680 u8 *orig_addr;
681 u32 orig_sn, metric;
682
683 ta = mgmt->sa;
684 ttl = rann->rann_ttl;
685 if (ttl <= 1) {
686 ifmsh->mshstats.dropped_frames_ttl++;
687 return;
688 }
689 ttl--;
690 flags = rann->rann_flags;
691 orig_addr = rann->rann_addr;
692 orig_sn = rann->rann_seq;
693 hopcount = rann->rann_hopcount;
694 hopcount++;
695 metric = rann->rann_metric;
696 mhwmp_dbg("received RANN from %pM\n", orig_addr);
697
698 rcu_read_lock();
699 mpath = mesh_path_lookup(orig_addr, sdata);
700 if (!mpath) {
701 mesh_path_add(orig_addr, sdata);
702 mpath = mesh_path_lookup(orig_addr, sdata);
703 if (!mpath) {
704 rcu_read_unlock();
705 sdata->u.mesh.mshstats.dropped_frames_no_route++;
706 return;
707 }
708 mesh_queue_preq(mpath,
709 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
710 }
711 if (mpath->sn < orig_sn) {
712 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
713 cpu_to_le32(orig_sn),
714 0, NULL, 0, broadcast_addr,
715 hopcount, ttl, 0,
716 cpu_to_le32(metric + mpath->metric),
717 0, sdata);
718 mpath->sn = orig_sn;
719 }
720 rcu_read_unlock();
721}
599 722
600 723
601void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 724void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
@@ -614,34 +737,34 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
614 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, 737 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
615 len - baselen, &elems); 738 len - baselen, &elems);
616 739
617 switch (mgmt->u.action.u.mesh_action.action_code) { 740 if (elems.preq) {
618 case MPATH_PREQ: 741 if (elems.preq_len != 37)
619 if (!elems.preq || elems.preq_len != 37)
620 /* Right now we support just 1 destination and no AE */ 742 /* Right now we support just 1 destination and no AE */
621 return; 743 return;
622 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq); 744 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
623 if (!last_hop_metric) 745 MPATH_PREQ);
624 return; 746 if (last_hop_metric)
625 hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric); 747 hwmp_preq_frame_process(sdata, mgmt, elems.preq,
626 break; 748 last_hop_metric);
627 case MPATH_PREP: 749 }
628 if (!elems.prep || elems.prep_len != 31) 750 if (elems.prep) {
751 if (elems.prep_len != 31)
629 /* Right now we support no AE */ 752 /* Right now we support no AE */
630 return; 753 return;
631 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep); 754 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
632 if (!last_hop_metric) 755 MPATH_PREP);
633 return; 756 if (last_hop_metric)
634 hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric); 757 hwmp_prep_frame_process(sdata, mgmt, elems.prep,
635 break; 758 last_hop_metric);
636 case MPATH_PERR: 759 }
637 if (!elems.perr || elems.perr_len != 12) 760 if (elems.perr) {
761 if (elems.perr_len != 15)
638 /* Right now we support only one destination per PERR */ 762 /* Right now we support only one destination per PERR */
639 return; 763 return;
640 hwmp_perr_frame_process(sdata, mgmt, elems.perr); 764 hwmp_perr_frame_process(sdata, mgmt, elems.perr);
641 default:
642 return;
643 } 765 }
644 766 if (elems.rann)
767 hwmp_rann_frame_process(sdata, mgmt, elems.rann);
645} 768}
646 769
647/** 770/**
@@ -661,7 +784,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
661 784
662 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); 785 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
663 if (!preq_node) { 786 if (!preq_node) {
664 printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n"); 787 mhwmp_dbg("could not allocate PREQ node\n");
665 return; 788 return;
666 } 789 }
667 790
@@ -670,7 +793,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
670 spin_unlock(&ifmsh->mesh_preq_queue_lock); 793 spin_unlock(&ifmsh->mesh_preq_queue_lock);
671 kfree(preq_node); 794 kfree(preq_node);
672 if (printk_ratelimit()) 795 if (printk_ratelimit())
673 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); 796 mhwmp_dbg("PREQ node queue full\n");
674 return; 797 return;
675 } 798 }
676 799
@@ -705,7 +828,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
705 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 828 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
706 struct mesh_preq_queue *preq_node; 829 struct mesh_preq_queue *preq_node;
707 struct mesh_path *mpath; 830 struct mesh_path *mpath;
708 u8 ttl, dst_flags; 831 u8 ttl, target_flags;
709 u32 lifetime; 832 u32 lifetime;
710 833
711 spin_lock_bh(&ifmsh->mesh_preq_queue_lock); 834 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -747,11 +870,11 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
747 870
748 ifmsh->last_preq = jiffies; 871 ifmsh->last_preq = jiffies;
749 872
750 if (time_after(jiffies, ifmsh->last_dsn_update + 873 if (time_after(jiffies, ifmsh->last_sn_update +
751 net_traversal_jiffies(sdata)) || 874 net_traversal_jiffies(sdata)) ||
752 time_before(jiffies, ifmsh->last_dsn_update)) { 875 time_before(jiffies, ifmsh->last_sn_update)) {
753 ++ifmsh->dsn; 876 ++ifmsh->sn;
754 sdata->u.mesh.last_dsn_update = jiffies; 877 sdata->u.mesh.last_sn_update = jiffies;
755 } 878 }
756 lifetime = default_lifetime(sdata); 879 lifetime = default_lifetime(sdata);
757 ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; 880 ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
@@ -762,14 +885,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
762 } 885 }
763 886
764 if (preq_node->flags & PREQ_Q_F_REFRESH) 887 if (preq_node->flags & PREQ_Q_F_REFRESH)
765 dst_flags = MP_F_DO; 888 target_flags = MP_F_DO;
766 else 889 else
767 dst_flags = MP_F_RF; 890 target_flags = MP_F_RF;
768 891
769 spin_unlock_bh(&mpath->state_lock); 892 spin_unlock_bh(&mpath->state_lock);
770 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, 893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
771 cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst, 894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
772 cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0, 895 cpu_to_le32(mpath->sn), broadcast_addr, 0,
773 ttl, cpu_to_le32(lifetime), 0, 896 ttl, cpu_to_le32(lifetime), 0,
774 cpu_to_le32(ifmsh->preq_id++), sdata); 897 cpu_to_le32(ifmsh->preq_id++), sdata);
775 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 898 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
@@ -796,15 +919,15 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
796 struct sk_buff *skb_to_free = NULL; 919 struct sk_buff *skb_to_free = NULL;
797 struct mesh_path *mpath; 920 struct mesh_path *mpath;
798 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 921 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
799 u8 *dst_addr = hdr->addr3; 922 u8 *target_addr = hdr->addr3;
800 int err = 0; 923 int err = 0;
801 924
802 rcu_read_lock(); 925 rcu_read_lock();
803 mpath = mesh_path_lookup(dst_addr, sdata); 926 mpath = mesh_path_lookup(target_addr, sdata);
804 927
805 if (!mpath) { 928 if (!mpath) {
806 mesh_path_add(dst_addr, sdata); 929 mesh_path_add(target_addr, sdata);
807 mpath = mesh_path_lookup(dst_addr, sdata); 930 mpath = mesh_path_lookup(target_addr, sdata);
808 if (!mpath) { 931 if (!mpath) {
809 sdata->u.mesh.mshstats.dropped_frames_no_route++; 932 sdata->u.mesh.mshstats.dropped_frames_no_route++;
810 err = -ENOSPC; 933 err = -ENOSPC;
@@ -813,17 +936,16 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
813 } 936 }
814 937
815 if (mpath->flags & MESH_PATH_ACTIVE) { 938 if (mpath->flags & MESH_PATH_ACTIVE) {
816 if (time_after(jiffies, mpath->exp_time + 939 if (time_after(jiffies,
817 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) 940 mpath->exp_time +
818 && !memcmp(sdata->dev->dev_addr, hdr->addr4, 941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
819 ETH_ALEN) 942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
820 && !(mpath->flags & MESH_PATH_RESOLVING) 943 !(mpath->flags & MESH_PATH_RESOLVING) &&
821 && !(mpath->flags & MESH_PATH_FIXED)) { 944 !(mpath->flags & MESH_PATH_FIXED)) {
822 mesh_queue_preq(mpath, 945 mesh_queue_preq(mpath,
823 PREQ_Q_F_START | PREQ_Q_F_REFRESH); 946 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
824 } 947 }
825 memcpy(hdr->addr1, mpath->next_hop->sta.addr, 948 memcpy(hdr->addr1, mpath->next_hop->sta.addr, ETH_ALEN);
826 ETH_ALEN);
827 } else { 949 } else {
828 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 950 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
829 if (!(mpath->flags & MESH_PATH_RESOLVING)) { 951 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -882,3 +1004,14 @@ void mesh_path_timer(unsigned long data)
882endmpathtimer: 1004endmpathtimer:
883 rcu_read_unlock(); 1005 rcu_read_unlock();
884} 1006}
1007
1008void
1009mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1010{
1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1012
1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr,
1014 cpu_to_le32(++ifmsh->sn),
1015 0, NULL, 0, broadcast_addr,
1016 0, MESH_TTL, 0, 0, 0, sdata);
1017}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 751c4d0e2b36..a8da23905c70 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 open80211s Ltd. 2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com> 3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -449,6 +449,7 @@ err_path_alloc:
449 */ 449 */
450void mesh_plink_broken(struct sta_info *sta) 450void mesh_plink_broken(struct sta_info *sta)
451{ 451{
452 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
452 struct mesh_path *mpath; 453 struct mesh_path *mpath;
453 struct mpath_node *node; 454 struct mpath_node *node;
454 struct hlist_node *p; 455 struct hlist_node *p;
@@ -463,11 +464,12 @@ void mesh_plink_broken(struct sta_info *sta)
463 mpath->flags & MESH_PATH_ACTIVE && 464 mpath->flags & MESH_PATH_ACTIVE &&
464 !(mpath->flags & MESH_PATH_FIXED)) { 465 !(mpath->flags & MESH_PATH_FIXED)) {
465 mpath->flags &= ~MESH_PATH_ACTIVE; 466 mpath->flags &= ~MESH_PATH_ACTIVE;
466 ++mpath->dsn; 467 ++mpath->sn;
467 spin_unlock_bh(&mpath->state_lock); 468 spin_unlock_bh(&mpath->state_lock);
468 mesh_path_error_tx(mpath->dst, 469 mesh_path_error_tx(MESH_TTL, mpath->dst,
469 cpu_to_le32(mpath->dsn), 470 cpu_to_le32(mpath->sn),
470 sdata->dev->broadcast, sdata); 471 cpu_to_le16(PERR_RCODE_DEST_UNREACH),
472 bcast, sdata);
471 } else 473 } else
472 spin_unlock_bh(&mpath->state_lock); 474 spin_unlock_bh(&mpath->state_lock);
473 } 475 }
@@ -601,7 +603,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
601{ 603{
602 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 604 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
603 struct mesh_path *mpath; 605 struct mesh_path *mpath;
604 u32 dsn = 0; 606 u32 sn = 0;
605 607
606 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { 608 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
607 u8 *ra, *da; 609 u8 *ra, *da;
@@ -610,8 +612,9 @@ void mesh_path_discard_frame(struct sk_buff *skb,
610 ra = hdr->addr1; 612 ra = hdr->addr1;
611 mpath = mesh_path_lookup(da, sdata); 613 mpath = mesh_path_lookup(da, sdata);
612 if (mpath) 614 if (mpath)
613 dsn = ++mpath->dsn; 615 sn = ++mpath->sn;
614 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata); 616 mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn),
617 cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
615 } 618 }
616 619
617 kfree_skb(skb); 620 kfree_skb(skb);
@@ -646,7 +649,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
646{ 649{
647 spin_lock_bh(&mpath->state_lock); 650 spin_lock_bh(&mpath->state_lock);
648 mesh_path_assign_nexthop(mpath, next_hop); 651 mesh_path_assign_nexthop(mpath, next_hop);
649 mpath->dsn = 0xffff; 652 mpath->sn = 0xffff;
650 mpath->metric = 0; 653 mpath->metric = 0;
651 mpath->hop_count = 0; 654 mpath->hop_count = 0;
652 mpath->exp_time = 0; 655 mpath->exp_time = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index ffcbad75e09b..0f7c6e6a4248 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 open80211s Ltd. 2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com> 3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -18,9 +18,8 @@
18#define mpl_dbg(fmt, args...) do { (void)(0); } while (0) 18#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
19#endif 19#endif
20 20
21#define PLINK_GET_FRAME_SUBTYPE(p) (p) 21#define PLINK_GET_LLID(p) (p + 4)
22#define PLINK_GET_LLID(p) (p + 1) 22#define PLINK_GET_PLID(p) (p + 6)
23#define PLINK_GET_PLID(p) (p + 3)
24 23
25#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ 24#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
26 jiffies + HZ * t / 1000)) 25 jiffies + HZ * t / 1000))
@@ -65,6 +64,7 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{ 64{
66 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); 65 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
67 mesh_accept_plinks_update(sdata); 66 mesh_accept_plinks_update(sdata);
67 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
68} 68}
69 69
70static inline 70static inline
@@ -72,12 +72,13 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
72{ 72{
73 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); 73 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
74 mesh_accept_plinks_update(sdata); 74 mesh_accept_plinks_update(sdata);
75 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
75} 76}
76 77
77/** 78/**
78 * mesh_plink_fsm_restart - restart a mesh peer link finite state machine 79 * mesh_plink_fsm_restart - restart a mesh peer link finite state machine
79 * 80 *
80 * @sta: mes peer link to restart 81 * @sta: mesh peer link to restart
81 * 82 *
82 * Locking: this function must be called holding sta->lock 83 * Locking: this function must be called holding sta->lock
83 */ 84 */
@@ -152,6 +153,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
152 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 153 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
153 struct ieee80211_mgmt *mgmt; 154 struct ieee80211_mgmt *mgmt;
154 bool include_plid = false; 155 bool include_plid = false;
156 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
155 u8 *pos; 157 u8 *pos;
156 int ie_len; 158 int ie_len;
157 159
@@ -169,7 +171,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
169 memcpy(mgmt->da, da, ETH_ALEN); 171 memcpy(mgmt->da, da, ETH_ALEN);
170 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 172 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
171 /* BSSID is left zeroed, wildcard value */ 173 /* BSSID is left zeroed, wildcard value */
172 mgmt->u.action.category = PLINK_CATEGORY; 174 mgmt->u.action.category = MESH_PLINK_CATEGORY;
173 mgmt->u.action.u.plink_action.action_code = action; 175 mgmt->u.action.u.plink_action.action_code = action;
174 176
175 if (action == PLINK_CLOSE) 177 if (action == PLINK_CLOSE)
@@ -179,7 +181,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
179 if (action == PLINK_CONFIRM) { 181 if (action == PLINK_CONFIRM) {
180 pos = skb_put(skb, 4); 182 pos = skb_put(skb, 4);
181 /* two-byte status code followed by two-byte AID */ 183 /* two-byte status code followed by two-byte AID */
182 memset(pos, 0, 4); 184 memset(pos, 0, 2);
185 memcpy(pos + 2, &plid, 2);
183 } 186 }
184 mesh_mgmt_ies_add(skb, sdata); 187 mesh_mgmt_ies_add(skb, sdata);
185 } 188 }
@@ -187,18 +190,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
187 /* Add Peer Link Management element */ 190 /* Add Peer Link Management element */
188 switch (action) { 191 switch (action) {
189 case PLINK_OPEN: 192 case PLINK_OPEN:
190 ie_len = 3; 193 ie_len = 6;
191 break; 194 break;
192 case PLINK_CONFIRM: 195 case PLINK_CONFIRM:
193 ie_len = 5; 196 ie_len = 8;
194 include_plid = true; 197 include_plid = true;
195 break; 198 break;
196 case PLINK_CLOSE: 199 case PLINK_CLOSE:
197 default: 200 default:
198 if (!plid) 201 if (!plid)
199 ie_len = 5; 202 ie_len = 8;
200 else { 203 else {
201 ie_len = 7; 204 ie_len = 10;
202 include_plid = true; 205 include_plid = true;
203 } 206 }
204 break; 207 break;
@@ -207,7 +210,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
207 pos = skb_put(skb, 2 + ie_len); 210 pos = skb_put(skb, 2 + ie_len);
208 *pos++ = WLAN_EID_PEER_LINK; 211 *pos++ = WLAN_EID_PEER_LINK;
209 *pos++ = ie_len; 212 *pos++ = ie_len;
210 *pos++ = action; 213 memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto));
214 pos += 4;
211 memcpy(pos, &llid, 2); 215 memcpy(pos, &llid, 2);
212 if (include_plid) { 216 if (include_plid) {
213 pos += 2; 217 pos += 2;
@@ -218,7 +222,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
218 memcpy(pos, &reason, 2); 222 memcpy(pos, &reason, 2);
219 } 223 }
220 224
221 ieee80211_tx_skb(sdata, skb, 1); 225 ieee80211_tx_skb(sdata, skb);
222 return 0; 226 return 0;
223} 227}
224 228
@@ -395,6 +399,17 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
395 u8 ie_len; 399 u8 ie_len;
396 u8 *baseaddr; 400 u8 *baseaddr;
397 __le16 plid, llid, reason; 401 __le16 plid, llid, reason;
402#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
403 static const char *mplstates[] = {
404 [PLINK_LISTEN] = "LISTEN",
405 [PLINK_OPN_SNT] = "OPN-SNT",
406 [PLINK_OPN_RCVD] = "OPN-RCVD",
407 [PLINK_CNF_RCVD] = "CNF_RCVD",
408 [PLINK_ESTAB] = "ESTAB",
409 [PLINK_HOLDING] = "HOLDING",
410 [PLINK_BLOCKED] = "BLOCKED"
411 };
412#endif
398 413
399 /* need action_code, aux */ 414 /* need action_code, aux */
400 if (len < IEEE80211_MIN_ACTION_SIZE + 3) 415 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
@@ -417,12 +432,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
417 return; 432 return;
418 } 433 }
419 434
420 ftype = *((u8 *)PLINK_GET_FRAME_SUBTYPE(elems.peer_link)); 435 ftype = mgmt->u.action.u.plink_action.action_code;
421 ie_len = elems.peer_link_len; 436 ie_len = elems.peer_link_len;
422 if ((ftype == PLINK_OPEN && ie_len != 3) || 437 if ((ftype == PLINK_OPEN && ie_len != 6) ||
423 (ftype == PLINK_CONFIRM && ie_len != 5) || 438 (ftype == PLINK_CONFIRM && ie_len != 8) ||
424 (ftype == PLINK_CLOSE && ie_len != 5 && ie_len != 7)) { 439 (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) {
425 mpl_dbg("Mesh plink: incorrect plink ie length\n"); 440 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
441 ftype, ie_len);
426 return; 442 return;
427 } 443 }
428 444
@@ -434,7 +450,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
434 * from the point of view of this host. 450 * from the point of view of this host.
435 */ 451 */
436 memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); 452 memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
437 if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 7)) 453 if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10))
438 memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); 454 memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);
439 455
440 rcu_read_lock(); 456 rcu_read_lock();
@@ -532,8 +548,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
532 } 548 }
533 } 549 }
534 550
535 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %d %d %d %d\n", 551 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
536 mgmt->sa, sta->plink_state, 552 mgmt->sa, mplstates[sta->plink_state],
537 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), 553 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
538 event); 554 event);
539 reason = 0; 555 reason = 0;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 71220a5d1406..6dc7b5ad9a41 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -426,7 +426,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
426 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 426 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
427 } 427 }
428 428
429 ieee80211_tx_skb(sdata, skb, 0); 429 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
430 ieee80211_tx_skb(sdata, skb);
430} 431}
431 432
432 433
@@ -467,7 +468,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
467 __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); 468 __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
468 else 469 else
469 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); 470 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
470 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); 471 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
472 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
473 ieee80211_tx_skb(sdata, skb);
471} 474}
472 475
473void ieee80211_send_pspoll(struct ieee80211_local *local, 476void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -498,7 +501,8 @@ void ieee80211_send_pspoll(struct ieee80211_local *local,
498 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN); 501 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
499 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN); 502 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
500 503
501 ieee80211_tx_skb(sdata, skb, 0); 504 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
505 ieee80211_tx_skb(sdata, skb);
502} 506}
503 507
504void ieee80211_send_nullfunc(struct ieee80211_local *local, 508void ieee80211_send_nullfunc(struct ieee80211_local *local,
@@ -531,7 +535,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
531 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 535 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
532 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); 536 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
533 537
534 ieee80211_tx_skb(sdata, skb, 0); 538 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
539 ieee80211_tx_skb(sdata, skb);
535} 540}
536 541
537/* spectrum management related things */ 542/* spectrum management related things */
@@ -1463,8 +1468,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1463 if (status_code != WLAN_STATUS_SUCCESS) { 1468 if (status_code != WLAN_STATUS_SUCCESS) {
1464 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 1469 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
1465 sdata->dev->name, status_code); 1470 sdata->dev->name, status_code);
1466 list_del(&wk->list); 1471 wk->state = IEEE80211_MGD_STATE_IDLE;
1467 kfree(wk);
1468 return RX_MGMT_CFG80211_ASSOC; 1472 return RX_MGMT_CFG80211_ASSOC;
1469 } 1473 }
1470 1474
@@ -1899,7 +1903,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1899 fc = le16_to_cpu(mgmt->frame_control); 1903 fc = le16_to_cpu(mgmt->frame_control);
1900 1904
1901 switch (fc & IEEE80211_FCTL_STYPE) { 1905 switch (fc & IEEE80211_FCTL_STYPE) {
1902 case IEEE80211_STYPE_PROBE_REQ:
1903 case IEEE80211_STYPE_PROBE_RESP: 1906 case IEEE80211_STYPE_PROBE_RESP:
1904 case IEEE80211_STYPE_BEACON: 1907 case IEEE80211_STYPE_BEACON:
1905 case IEEE80211_STYPE_AUTH: 1908 case IEEE80211_STYPE_AUTH:
@@ -2505,6 +2508,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2505 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2508 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2506 struct ieee80211_mgd_work *wk; 2509 struct ieee80211_mgd_work *wk;
2507 const u8 *bssid = NULL; 2510 const u8 *bssid = NULL;
2511 bool not_auth_yet = false;
2508 2512
2509 mutex_lock(&ifmgd->mtx); 2513 mutex_lock(&ifmgd->mtx);
2510 2514
@@ -2514,6 +2518,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2514 } else list_for_each_entry(wk, &ifmgd->work_list, list) { 2518 } else list_for_each_entry(wk, &ifmgd->work_list, list) {
2515 if (&wk->bss->cbss == req->bss) { 2519 if (&wk->bss->cbss == req->bss) {
2516 bssid = req->bss->bssid; 2520 bssid = req->bss->bssid;
2521 if (wk->state == IEEE80211_MGD_STATE_PROBE)
2522 not_auth_yet = true;
2517 list_del(&wk->list); 2523 list_del(&wk->list);
2518 kfree(wk); 2524 kfree(wk);
2519 break; 2525 break;
@@ -2521,6 +2527,20 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2521 } 2527 }
2522 2528
2523 /* 2529 /*
2530 * If somebody requests authentication and we haven't
2531 * sent out an auth frame yet there's no need to send
2532 * out a deauth frame either. If the state was PROBE,
2533 * then this is the case. If it's AUTH we have sent a
2534 * frame, and if it's IDLE we have completed the auth
2535 * process already.
2536 */
2537 if (not_auth_yet) {
2538 mutex_unlock(&ifmgd->mtx);
2539 __cfg80211_auth_canceled(sdata->dev, bssid);
2540 return 0;
2541 }
2542
2543 /*
2524 * cfg80211 should catch this ... but it's racy since 2544 * cfg80211 should catch this ... but it's racy since
2525 * we can receive a deauth frame, process it, hand it 2545 * we can receive a deauth frame, process it, hand it
2526 * to cfg80211 while that's in a locked section already 2546 * to cfg80211 while that's in a locked section already
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b33efc4fc267..b9007f80cb92 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -163,8 +163,7 @@ struct rate_control_ref *rate_control_alloc(const char *name,
163#ifdef CONFIG_MAC80211_DEBUGFS 163#ifdef CONFIG_MAC80211_DEBUGFS
164 debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); 164 debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
165 local->debugfs.rcdir = debugfsdir; 165 local->debugfs.rcdir = debugfsdir;
166 local->debugfs.rcname = debugfs_create_file("name", 0400, debugfsdir, 166 debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops);
167 ref, &rcname_ops);
168#endif 167#endif
169 168
170 ref->priv = ref->ops->alloc(&local->hw, debugfsdir); 169 ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
@@ -188,9 +187,7 @@ static void rate_control_release(struct kref *kref)
188 ctrl_ref->ops->free(ctrl_ref->priv); 187 ctrl_ref->ops->free(ctrl_ref->priv);
189 188
190#ifdef CONFIG_MAC80211_DEBUGFS 189#ifdef CONFIG_MAC80211_DEBUGFS
191 debugfs_remove(ctrl_ref->local->debugfs.rcname); 190 debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir);
192 ctrl_ref->local->debugfs.rcname = NULL;
193 debugfs_remove(ctrl_ref->local->debugfs.rcdir);
194 ctrl_ref->local->debugfs.rcdir = NULL; 191 ctrl_ref->local->debugfs.rcdir = NULL;
195#endif 192#endif
196 193
@@ -287,9 +284,16 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
287 struct rate_control_ref *ref, *old; 284 struct rate_control_ref *ref, *old;
288 285
289 ASSERT_RTNL(); 286 ASSERT_RTNL();
287
290 if (local->open_count) 288 if (local->open_count)
291 return -EBUSY; 289 return -EBUSY;
292 290
291 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
292 if (WARN_ON(!local->ops->set_rts_threshold))
293 return -EINVAL;
294 return 0;
295 }
296
293 ref = rate_control_alloc(name, local); 297 ref = rate_control_alloc(name, local);
294 if (!ref) { 298 if (!ref) {
295 printk(KERN_WARNING "%s: Failed to select rate control " 299 printk(KERN_WARNING "%s: Failed to select rate control "
@@ -308,7 +312,6 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
308 "algorithm '%s'\n", wiphy_name(local->hw.wiphy), 312 "algorithm '%s'\n", wiphy_name(local->hw.wiphy),
309 ref->ops->name); 313 ref->ops->name);
310 314
311
312 return 0; 315 return 0;
313} 316}
314 317
@@ -317,6 +320,10 @@ void rate_control_deinitialize(struct ieee80211_local *local)
317 struct rate_control_ref *ref; 320 struct rate_control_ref *ref;
318 321
319 ref = local->rate_ctrl; 322 ref = local->rate_ctrl;
323
324 if (!ref)
325 return;
326
320 local->rate_ctrl = NULL; 327 local->rate_ctrl = NULL;
321 rate_control_put(ref); 328 rate_control_put(ref);
322} 329}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 2ab5ad9e71ce..cb9bd1f65e27 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -59,6 +59,9 @@ static inline void rate_control_rate_init(struct sta_info *sta)
59 void *priv_sta = sta->rate_ctrl_priv; 59 void *priv_sta = sta->rate_ctrl_priv;
60 struct ieee80211_supported_band *sband; 60 struct ieee80211_supported_band *sband;
61 61
62 if (!ref)
63 return;
64
62 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 65 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
63 66
64 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 67 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
@@ -72,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
72 struct ieee80211_sta *ista = &sta->sta; 75 struct ieee80211_sta *ista = &sta->sta;
73 void *priv_sta = sta->rate_ctrl_priv; 76 void *priv_sta = sta->rate_ctrl_priv;
74 77
75 if (ref->ops->rate_update) 78 if (ref && ref->ops->rate_update)
76 ref->ops->rate_update(ref->priv, sband, ista, 79 ref->ops->rate_update(ref->priv, sband, ista,
77 priv_sta, changed); 80 priv_sta, changed);
78} 81}
@@ -97,7 +100,7 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
97{ 100{
98#ifdef CONFIG_MAC80211_DEBUGFS 101#ifdef CONFIG_MAC80211_DEBUGFS
99 struct rate_control_ref *ref = sta->rate_ctrl; 102 struct rate_control_ref *ref = sta->rate_ctrl;
100 if (sta->debugfs.dir && ref->ops->add_sta_debugfs) 103 if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs)
101 ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, 104 ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv,
102 sta->debugfs.dir); 105 sta->debugfs.dir);
103#endif 106#endif
@@ -107,7 +110,7 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
107{ 110{
108#ifdef CONFIG_MAC80211_DEBUGFS 111#ifdef CONFIG_MAC80211_DEBUGFS
109 struct rate_control_ref *ref = sta->rate_ctrl; 112 struct rate_control_ref *ref = sta->rate_ctrl;
110 if (ref->ops->remove_sta_debugfs) 113 if (ref && ref->ops->remove_sta_debugfs)
111 ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv); 114 ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv);
112#endif 115#endif
113} 116}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index a59043fbb0ff..45667054a5f3 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -6,6 +6,7 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/sched.h>
9#include <linux/spinlock.h> 10#include <linux/spinlock.h>
10#include <linux/poll.h> 11#include <linux/poll.h>
11#include <linux/netdevice.h> 12#include <linux/netdevice.h>
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5c385e3c1d1f..beecf50fbd10 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -27,11 +27,10 @@
27#include "tkip.h" 27#include "tkip.h"
28#include "wme.h" 28#include "wme.h"
29 29
30static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 30static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx, 31 struct tid_ampdu_rx *tid_agg_rx,
32 struct sk_buff *skb, 32 u16 head_seq_num);
33 u16 mpdu_seq_num, 33
34 int bar_req);
35/* 34/*
36 * monitor mode reception 35 * monitor mode reception
37 * 36 *
@@ -39,11 +38,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
39 * only useful for monitoring. 38 * only useful for monitoring.
40 */ 39 */
41static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 40static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
42 struct sk_buff *skb, 41 struct sk_buff *skb)
43 int rtap_len)
44{ 42{
45 skb_pull(skb, rtap_len);
46
47 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
48 if (likely(skb->len > FCS_LEN)) 44 if (likely(skb->len > FCS_LEN))
49 skb_trim(skb, skb->len - FCS_LEN); 45 skb_trim(skb, skb->len - FCS_LEN);
@@ -59,15 +55,14 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
59} 55}
60 56
61static inline int should_drop_frame(struct sk_buff *skb, 57static inline int should_drop_frame(struct sk_buff *skb,
62 int present_fcs_len, 58 int present_fcs_len)
63 int radiotap_len)
64{ 59{
65 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
66 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
67 62
68 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
69 return 1; 64 return 1;
70 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) 65 if (unlikely(skb->len < 16 + present_fcs_len))
71 return 1; 66 return 1;
72 if (ieee80211_is_ctl(hdr->frame_control) && 67 if (ieee80211_is_ctl(hdr->frame_control) &&
73 !ieee80211_is_pspoll(hdr->frame_control) && 68 !ieee80211_is_pspoll(hdr->frame_control) &&
@@ -95,10 +90,6 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
95 if (len & 1) /* padding for RX_FLAGS if necessary */ 90 if (len & 1) /* padding for RX_FLAGS if necessary */
96 len++; 91 len++;
97 92
98 /* make sure radiotap starts at a naturally aligned address */
99 if (len % 8)
100 len = roundup(len, 8);
101
102 return len; 93 return len;
103} 94}
104 95
@@ -116,6 +107,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
116 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 107 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
117 struct ieee80211_radiotap_header *rthdr; 108 struct ieee80211_radiotap_header *rthdr;
118 unsigned char *pos; 109 unsigned char *pos;
110 u16 rx_flags = 0;
119 111
120 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 112 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
121 memset(rthdr, 0, rtap_len); 113 memset(rthdr, 0, rtap_len);
@@ -134,7 +126,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
134 126
135 /* IEEE80211_RADIOTAP_TSFT */ 127 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) { 128 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime); 129 put_unaligned_le64(status->mactime, pos);
138 rthdr->it_present |= 130 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 131 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8; 132 pos += 8;
@@ -166,17 +158,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
166 pos++; 158 pos++;
167 159
168 /* IEEE80211_RADIOTAP_CHANNEL */ 160 /* IEEE80211_RADIOTAP_CHANNEL */
169 *(__le16 *)pos = cpu_to_le16(status->freq); 161 put_unaligned_le16(status->freq, pos);
170 pos += 2; 162 pos += 2;
171 if (status->band == IEEE80211_BAND_5GHZ) 163 if (status->band == IEEE80211_BAND_5GHZ)
172 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | 164 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
173 IEEE80211_CHAN_5GHZ); 165 pos);
166 else if (status->flag & RX_FLAG_HT)
167 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
168 pos);
174 else if (rate->flags & IEEE80211_RATE_ERP_G) 169 else if (rate->flags & IEEE80211_RATE_ERP_G)
175 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | 170 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
176 IEEE80211_CHAN_2GHZ); 171 pos);
177 else 172 else
178 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK | 173 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
179 IEEE80211_CHAN_2GHZ); 174 pos);
180 pos += 2; 175 pos += 2;
181 176
182 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 177 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -205,10 +200,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
205 200
206 /* IEEE80211_RADIOTAP_RX_FLAGS */ 201 /* IEEE80211_RADIOTAP_RX_FLAGS */
207 /* ensure 2 byte alignment for the 2 byte field as required */ 202 /* ensure 2 byte alignment for the 2 byte field as required */
208 if ((pos - (unsigned char *)rthdr) & 1) 203 if ((pos - (u8 *)rthdr) & 1)
209 pos++; 204 pos++;
210 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 205 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
211 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP); 206 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
207 put_unaligned_le16(rx_flags, pos);
212 pos += 2; 208 pos += 2;
213} 209}
214 210
@@ -227,7 +223,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
227 struct sk_buff *skb, *skb2; 223 struct sk_buff *skb, *skb2;
228 struct net_device *prev_dev = NULL; 224 struct net_device *prev_dev = NULL;
229 int present_fcs_len = 0; 225 int present_fcs_len = 0;
230 int rtap_len = 0;
231 226
232 /* 227 /*
233 * First, we may need to make a copy of the skb because 228 * First, we may need to make a copy of the skb because
@@ -237,25 +232,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
237 * We don't need to, of course, if we aren't going to return 232 * We don't need to, of course, if we aren't going to return
238 * the SKB because it has a bad FCS/PLCP checksum. 233 * the SKB because it has a bad FCS/PLCP checksum.
239 */ 234 */
240 if (status->flag & RX_FLAG_RADIOTAP) 235
241 rtap_len = ieee80211_get_radiotap_len(origskb->data); 236 /* room for the radiotap header based on driver features */
242 else 237 needed_headroom = ieee80211_rx_radiotap_len(local, status);
243 /* room for the radiotap header based on driver features */
244 needed_headroom = ieee80211_rx_radiotap_len(local, status);
245 238
246 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 239 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
247 present_fcs_len = FCS_LEN; 240 present_fcs_len = FCS_LEN;
248 241
249 if (!local->monitors) { 242 if (!local->monitors) {
250 if (should_drop_frame(origskb, present_fcs_len, rtap_len)) { 243 if (should_drop_frame(origskb, present_fcs_len)) {
251 dev_kfree_skb(origskb); 244 dev_kfree_skb(origskb);
252 return NULL; 245 return NULL;
253 } 246 }
254 247
255 return remove_monitor_info(local, origskb, rtap_len); 248 return remove_monitor_info(local, origskb);
256 } 249 }
257 250
258 if (should_drop_frame(origskb, present_fcs_len, rtap_len)) { 251 if (should_drop_frame(origskb, present_fcs_len)) {
259 /* only need to expand headroom if necessary */ 252 /* only need to expand headroom if necessary */
260 skb = origskb; 253 skb = origskb;
261 origskb = NULL; 254 origskb = NULL;
@@ -279,16 +272,14 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
279 */ 272 */
280 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 273 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
281 274
282 origskb = remove_monitor_info(local, origskb, rtap_len); 275 origskb = remove_monitor_info(local, origskb);
283 276
284 if (!skb) 277 if (!skb)
285 return origskb; 278 return origskb;
286 } 279 }
287 280
288 /* if necessary, prepend radiotap information */ 281 /* prepend radiotap information */
289 if (!(status->flag & RX_FLAG_RADIOTAP)) 282 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
290 ieee80211_add_rx_radiotap_header(local, skb, rate,
291 needed_headroom);
292 283
293 skb_reset_mac_header(skb); 284 skb_reset_mac_header(skb);
294 skb->ip_summed = CHECKSUM_UNNECESSARY; 285 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -489,7 +480,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
489{ 480{
490 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 481 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
491 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 482 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
492 char *dev_addr = rx->dev->dev_addr; 483 char *dev_addr = rx->sdata->dev->dev_addr;
493 484
494 if (ieee80211_is_data(hdr->frame_control)) { 485 if (ieee80211_is_data(hdr->frame_control)) {
495 if (is_multicast_ether_addr(hdr->addr1)) { 486 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -518,7 +509,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
518 509
519 if (ieee80211_is_action(hdr->frame_control)) { 510 if (ieee80211_is_action(hdr->frame_control)) {
520 mgmt = (struct ieee80211_mgmt *)hdr; 511 mgmt = (struct ieee80211_mgmt *)hdr;
521 if (mgmt->u.action.category != PLINK_CATEGORY) 512 if (mgmt->u.action.category != MESH_PLINK_CATEGORY)
522 return RX_DROP_MONITOR; 513 return RX_DROP_MONITOR;
523 return RX_CONTINUE; 514 return RX_CONTINUE;
524 } 515 }
@@ -603,7 +594,9 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
603static ieee80211_rx_result debug_noinline 594static ieee80211_rx_result debug_noinline
604ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 595ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
605{ 596{
606 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 597 struct sk_buff *skb = rx->skb;
598 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
607 int keyidx; 600 int keyidx;
608 int hdrlen; 601 int hdrlen;
609 ieee80211_rx_result result = RX_DROP_UNUSABLE; 602 ieee80211_rx_result result = RX_DROP_UNUSABLE;
@@ -657,8 +650,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
657 return RX_CONTINUE; 650 return RX_CONTINUE;
658 } else if (mmie_keyidx >= 0) { 651 } else if (mmie_keyidx >= 0) {
659 /* Broadcast/multicast robust management frame / BIP */ 652 /* Broadcast/multicast robust management frame / BIP */
660 if ((rx->status->flag & RX_FLAG_DECRYPTED) && 653 if ((status->flag & RX_FLAG_DECRYPTED) &&
661 (rx->status->flag & RX_FLAG_IV_STRIPPED)) 654 (status->flag & RX_FLAG_IV_STRIPPED))
662 return RX_CONTINUE; 655 return RX_CONTINUE;
663 656
664 if (mmie_keyidx < NUM_DEFAULT_KEYS || 657 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
@@ -690,8 +683,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
690 * we somehow allow the driver to tell us which key 683 * we somehow allow the driver to tell us which key
691 * the hardware used if this flag is set? 684 * the hardware used if this flag is set?
692 */ 685 */
693 if ((rx->status->flag & RX_FLAG_DECRYPTED) && 686 if ((status->flag & RX_FLAG_DECRYPTED) &&
694 (rx->status->flag & RX_FLAG_IV_STRIPPED)) 687 (status->flag & RX_FLAG_IV_STRIPPED))
695 return RX_CONTINUE; 688 return RX_CONTINUE;
696 689
697 hdrlen = ieee80211_hdrlen(hdr->frame_control); 690 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -727,8 +720,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
727 /* Check for weak IVs if possible */ 720 /* Check for weak IVs if possible */
728 if (rx->sta && rx->key->conf.alg == ALG_WEP && 721 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
729 ieee80211_is_data(hdr->frame_control) && 722 ieee80211_is_data(hdr->frame_control) &&
730 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) || 723 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
731 !(rx->status->flag & RX_FLAG_DECRYPTED)) && 724 !(status->flag & RX_FLAG_DECRYPTED)) &&
732 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 725 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
733 rx->sta->wep_weak_iv_count++; 726 rx->sta->wep_weak_iv_count++;
734 727
@@ -748,7 +741,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
748 } 741 }
749 742
750 /* either the frame has been decrypted or will be dropped */ 743 /* either the frame has been decrypted or will be dropped */
751 rx->status->flag |= RX_FLAG_DECRYPTED; 744 status->flag |= RX_FLAG_DECRYPTED;
752 745
753 return result; 746 return result;
754} 747}
@@ -792,7 +785,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
792 struct ieee80211_local *local = sdata->local; 785 struct ieee80211_local *local = sdata->local;
793 786
794 atomic_inc(&sdata->bss->num_sta_ps); 787 atomic_inc(&sdata->bss->num_sta_ps);
795 set_sta_flags(sta, WLAN_STA_PS); 788 set_sta_flags(sta, WLAN_STA_PS_STA);
796 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); 789 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
797#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 790#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
798 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 791 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
@@ -800,45 +793,37 @@ static void ap_sta_ps_start(struct sta_info *sta)
800#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 793#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
801} 794}
802 795
803static int ap_sta_ps_end(struct sta_info *sta) 796static void ap_sta_ps_end(struct sta_info *sta)
804{ 797{
805 struct ieee80211_sub_if_data *sdata = sta->sdata; 798 struct ieee80211_sub_if_data *sdata = sta->sdata;
806 struct ieee80211_local *local = sdata->local;
807 int sent, buffered;
808 799
809 atomic_dec(&sdata->bss->num_sta_ps); 800 atomic_dec(&sdata->bss->num_sta_ps);
810 801
811 clear_sta_flags(sta, WLAN_STA_PS); 802 clear_sta_flags(sta, WLAN_STA_PS_STA);
812 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
813
814 if (!skb_queue_empty(&sta->ps_tx_buf))
815 sta_info_clear_tim_bit(sta);
816 803
817#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 804#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
818 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 805 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
819 sdata->dev->name, sta->sta.addr, sta->sta.aid); 806 sdata->dev->name, sta->sta.addr, sta->sta.aid);
820#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 807#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
821 808
822 /* Send all buffered frames to the station */ 809 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
823 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
824 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
825 sent += buffered;
826 local->total_ps_buffered -= buffered;
827
828#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 810#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
829 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 811 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
830 "since STA not sleeping anymore\n", sdata->dev->name, 812 sdata->dev->name, sta->sta.addr, sta->sta.aid);
831 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
832#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 813#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
814 return;
815 }
833 816
834 return sent; 817 ieee80211_sta_ps_deliver_wakeup(sta);
835} 818}
836 819
837static ieee80211_rx_result debug_noinline 820static ieee80211_rx_result debug_noinline
838ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 821ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
839{ 822{
840 struct sta_info *sta = rx->sta; 823 struct sta_info *sta = rx->sta;
841 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 824 struct sk_buff *skb = rx->skb;
825 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
826 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
842 827
843 if (!sta) 828 if (!sta)
844 return RX_CONTINUE; 829 return RX_CONTINUE;
@@ -869,9 +854,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
869 854
870 sta->rx_fragments++; 855 sta->rx_fragments++;
871 sta->rx_bytes += rx->skb->len; 856 sta->rx_bytes += rx->skb->len;
872 sta->last_signal = rx->status->signal; 857 sta->last_signal = status->signal;
873 sta->last_qual = rx->status->qual; 858 sta->last_noise = status->noise;
874 sta->last_noise = rx->status->noise;
875 859
876 /* 860 /*
877 * Change STA power saving mode only at the end of a frame 861 * Change STA power saving mode only at the end of a frame
@@ -880,7 +864,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
880 if (!ieee80211_has_morefrags(hdr->frame_control) && 864 if (!ieee80211_has_morefrags(hdr->frame_control) &&
881 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 865 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
882 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 866 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
883 if (test_sta_flags(sta, WLAN_STA_PS)) { 867 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
884 /* 868 /*
885 * Ignore doze->wake transitions that are 869 * Ignore doze->wake transitions that are
886 * indicated by non-data frames, the standard 870 * indicated by non-data frames, the standard
@@ -891,19 +875,24 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
891 */ 875 */
892 if (ieee80211_is_data(hdr->frame_control) && 876 if (ieee80211_is_data(hdr->frame_control) &&
893 !ieee80211_has_pm(hdr->frame_control)) 877 !ieee80211_has_pm(hdr->frame_control))
894 rx->sent_ps_buffered += ap_sta_ps_end(sta); 878 ap_sta_ps_end(sta);
895 } else { 879 } else {
896 if (ieee80211_has_pm(hdr->frame_control)) 880 if (ieee80211_has_pm(hdr->frame_control))
897 ap_sta_ps_start(sta); 881 ap_sta_ps_start(sta);
898 } 882 }
899 } 883 }
900 884
901 /* Drop data::nullfunc frames silently, since they are used only to 885 /*
902 * control station power saving mode. */ 886 * Drop (qos-)data::nullfunc frames silently, since they
903 if (ieee80211_is_nullfunc(hdr->frame_control)) { 887 * are used only to control station power saving mode.
888 */
889 if (ieee80211_is_nullfunc(hdr->frame_control) ||
890 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
904 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 891 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
905 /* Update counter and free packet here to avoid counting this 892 /*
906 * as a dropped packed. */ 893 * Update counter and free packet here to avoid
894 * counting this as a dropped packed.
895 */
907 sta->rx_packets++; 896 sta->rx_packets++;
908 dev_kfree_skb(rx->skb); 897 dev_kfree_skb(rx->skb);
909 return RX_QUEUED; 898 return RX_QUEUED;
@@ -1103,9 +1092,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1103static ieee80211_rx_result debug_noinline 1092static ieee80211_rx_result debug_noinline
1104ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) 1093ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1105{ 1094{
1106 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1095 struct ieee80211_sub_if_data *sdata = rx->sdata;
1107 struct sk_buff *skb;
1108 int no_pending_pkts;
1109 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; 1096 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1110 1097
1111 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || 1098 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
@@ -1116,56 +1103,10 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1116 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) 1103 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1117 return RX_DROP_UNUSABLE; 1104 return RX_DROP_UNUSABLE;
1118 1105
1119 skb = skb_dequeue(&rx->sta->tx_filtered); 1106 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1120 if (!skb) { 1107 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1121 skb = skb_dequeue(&rx->sta->ps_tx_buf); 1108 else
1122 if (skb) 1109 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1123 rx->local->total_ps_buffered--;
1124 }
1125 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1126 skb_queue_empty(&rx->sta->ps_tx_buf);
1127
1128 if (skb) {
1129 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1130 struct ieee80211_hdr *hdr =
1131 (struct ieee80211_hdr *) skb->data;
1132
1133 /*
1134 * Tell TX path to send this frame even though the STA may
1135 * still remain is PS mode after this frame exchange.
1136 */
1137 info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
1138
1139#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1140 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1141 rx->sta->sta.addr, rx->sta->sta.aid,
1142 skb_queue_len(&rx->sta->ps_tx_buf));
1143#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1144
1145 /* Use MoreData flag to indicate whether there are more
1146 * buffered frames for this STA */
1147 if (no_pending_pkts)
1148 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1149 else
1150 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1151
1152 ieee80211_add_pending_skb(rx->local, skb);
1153
1154 if (no_pending_pkts)
1155 sta_info_clear_tim_bit(rx->sta);
1156#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1157 } else if (!rx->sent_ps_buffered) {
1158 /*
1159 * FIXME: This can be the result of a race condition between
1160 * us expiring a frame and the station polling for it.
1161 * Should we send it a null-func frame indicating we
1162 * have nothing buffered for it?
1163 */
1164 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1165 "though there are no buffered frames for it\n",
1166 rx->dev->name, rx->sta->sta.addr);
1167#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1168 }
1169 1110
1170 /* Free PS Poll skb here instead of returning RX_DROP that would 1111 /* Free PS Poll skb here instead of returning RX_DROP that would
1171 * count as an dropped frame. */ 1112 * count as an dropped frame. */
@@ -1206,11 +1147,14 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1206static int 1147static int
1207ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1148ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1208{ 1149{
1150 struct sk_buff *skb = rx->skb;
1151 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1152
1209 /* 1153 /*
1210 * Pass through unencrypted frames if the hardware has 1154 * Pass through unencrypted frames if the hardware has
1211 * decrypted them already. 1155 * decrypted them already.
1212 */ 1156 */
1213 if (rx->status->flag & RX_FLAG_DECRYPTED) 1157 if (status->flag & RX_FLAG_DECRYPTED)
1214 return 0; 1158 return 0;
1215 1159
1216 /* Drop unencrypted frames if key is set. */ 1160 /* Drop unencrypted frames if key is set. */
@@ -1224,8 +1168,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1224 rx->key)) 1168 rx->key))
1225 return -EACCES; 1169 return -EACCES;
1226 /* BIP does not use Protected field, so need to check MMIE */ 1170 /* BIP does not use Protected field, so need to check MMIE */
1227 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) 1171 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1228 && ieee80211_get_mmie_keyidx(rx->skb) < 0 && 1172 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1229 rx->key)) 1173 rx->key))
1230 return -EACCES; 1174 return -EACCES;
1231 /* 1175 /*
@@ -1244,8 +1188,18 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1244static int 1188static int
1245__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1189__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1246{ 1190{
1247 struct net_device *dev = rx->dev; 1191 struct ieee80211_sub_if_data *sdata = rx->sdata;
1248 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1192 struct net_device *dev = sdata->dev;
1193 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1194
1195 if (ieee80211_has_a4(hdr->frame_control) &&
1196 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1197 return -1;
1198
1199 if (is_multicast_ether_addr(hdr->addr1) &&
1200 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
1201 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1202 return -1;
1249 1203
1250 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); 1204 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1251} 1205}
@@ -1264,7 +1218,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1264 * of whether the frame was encrypted or not. 1218 * of whether the frame was encrypted or not.
1265 */ 1219 */
1266 if (ehdr->h_proto == htons(ETH_P_PAE) && 1220 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1267 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 || 1221 (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 ||
1268 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1222 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1269 return true; 1223 return true;
1270 1224
@@ -1281,10 +1235,10 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1281static void 1235static void
1282ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 1236ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1283{ 1237{
1284 struct net_device *dev = rx->dev; 1238 struct ieee80211_sub_if_data *sdata = rx->sdata;
1239 struct net_device *dev = sdata->dev;
1285 struct ieee80211_local *local = rx->local; 1240 struct ieee80211_local *local = rx->local;
1286 struct sk_buff *skb, *xmit_skb; 1241 struct sk_buff *skb, *xmit_skb;
1287 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1288 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1242 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1289 struct sta_info *dsta; 1243 struct sta_info *dsta;
1290 1244
@@ -1294,7 +1248,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1294 if ((sdata->vif.type == NL80211_IFTYPE_AP || 1248 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1295 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1249 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1296 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 1250 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1297 (rx->flags & IEEE80211_RX_RA_MATCH)) { 1251 (rx->flags & IEEE80211_RX_RA_MATCH) &&
1252 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1298 if (is_multicast_ether_addr(ehdr->h_dest)) { 1253 if (is_multicast_ether_addr(ehdr->h_dest)) {
1299 /* 1254 /*
1300 * send multicast frames both to higher layers in 1255 * send multicast frames both to higher layers in
@@ -1337,10 +1292,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1337 skb = NULL; 1292 skb = NULL;
1338 } else { 1293 } else {
1339 u8 *data = skb->data; 1294 u8 *data = skb->data;
1340 size_t len = skb->len; 1295 size_t len = skb_headlen(skb);
1341 u8 *new = __skb_push(skb, align); 1296 skb->data -= align;
1342 memmove(new, data, len); 1297 memmove(skb->data, data, len);
1343 __skb_trim(skb, len); 1298 skb_set_tail_pointer(skb, len);
1344 } 1299 }
1345 } 1300 }
1346#endif 1301#endif
@@ -1365,7 +1320,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1365static ieee80211_rx_result debug_noinline 1320static ieee80211_rx_result debug_noinline
1366ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1321ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1367{ 1322{
1368 struct net_device *dev = rx->dev; 1323 struct net_device *dev = rx->sdata->dev;
1369 struct ieee80211_local *local = rx->local; 1324 struct ieee80211_local *local = rx->local;
1370 u16 ethertype; 1325 u16 ethertype;
1371 u8 *payload; 1326 u8 *payload;
@@ -1490,12 +1445,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1490 unsigned int hdrlen; 1445 unsigned int hdrlen;
1491 struct sk_buff *skb = rx->skb, *fwd_skb; 1446 struct sk_buff *skb = rx->skb, *fwd_skb;
1492 struct ieee80211_local *local = rx->local; 1447 struct ieee80211_local *local = rx->local;
1493 struct ieee80211_sub_if_data *sdata; 1448 struct ieee80211_sub_if_data *sdata = rx->sdata;
1494 1449
1495 hdr = (struct ieee80211_hdr *) skb->data; 1450 hdr = (struct ieee80211_hdr *) skb->data;
1496 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1451 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1497 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1452 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1498 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1499 1453
1500 if (!ieee80211_is_data(hdr->frame_control)) 1454 if (!ieee80211_is_data(hdr->frame_control))
1501 return RX_CONTINUE; 1455 return RX_CONTINUE;
@@ -1533,7 +1487,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1533 1487
1534 /* Frame has reached destination. Don't forward */ 1488 /* Frame has reached destination. Don't forward */
1535 if (!is_multicast_ether_addr(hdr->addr1) && 1489 if (!is_multicast_ether_addr(hdr->addr1) &&
1536 compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) 1490 compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0)
1537 return RX_CONTINUE; 1491 return RX_CONTINUE;
1538 1492
1539 mesh_hdr->ttl--; 1493 mesh_hdr->ttl--;
@@ -1550,10 +1504,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1550 1504
1551 if (!fwd_skb && net_ratelimit()) 1505 if (!fwd_skb && net_ratelimit())
1552 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1506 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1553 rx->dev->name); 1507 sdata->dev->name);
1554 1508
1555 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1509 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1556 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN); 1510 memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN);
1557 info = IEEE80211_SKB_CB(fwd_skb); 1511 info = IEEE80211_SKB_CB(fwd_skb);
1558 memset(info, 0, sizeof(*info)); 1512 memset(info, 0, sizeof(*info));
1559 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1513 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1587,7 +1541,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1587 } 1541 }
1588 1542
1589 if (is_multicast_ether_addr(hdr->addr1) || 1543 if (is_multicast_ether_addr(hdr->addr1) ||
1590 rx->dev->flags & IFF_PROMISC) 1544 sdata->dev->flags & IFF_PROMISC)
1591 return RX_CONTINUE; 1545 return RX_CONTINUE;
1592 else 1546 else
1593 return RX_DROP_MONITOR; 1547 return RX_DROP_MONITOR;
@@ -1597,7 +1551,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1597static ieee80211_rx_result debug_noinline 1551static ieee80211_rx_result debug_noinline
1598ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1552ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1599{ 1553{
1600 struct net_device *dev = rx->dev; 1554 struct ieee80211_sub_if_data *sdata = rx->sdata;
1555 struct net_device *dev = sdata->dev;
1601 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1556 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1602 __le16 fc = hdr->frame_control; 1557 __le16 fc = hdr->frame_control;
1603 int err; 1558 int err;
@@ -1608,6 +1563,14 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1608 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1563 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1609 return RX_DROP_MONITOR; 1564 return RX_DROP_MONITOR;
1610 1565
1566 /*
1567 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1568 * that a 4-addr station can be detected and moved into a separate VLAN
1569 */
1570 if (ieee80211_has_a4(hdr->frame_control) &&
1571 sdata->vif.type == NL80211_IFTYPE_AP)
1572 return RX_DROP_MONITOR;
1573
1611 err = __ieee80211_data_to_8023(rx); 1574 err = __ieee80211_data_to_8023(rx);
1612 if (unlikely(err)) 1575 if (unlikely(err))
1613 return RX_DROP_UNUSABLE; 1576 return RX_DROP_UNUSABLE;
@@ -1641,11 +1604,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1641 1604
1642 if (ieee80211_is_back_req(bar->frame_control)) { 1605 if (ieee80211_is_back_req(bar->frame_control)) {
1643 if (!rx->sta) 1606 if (!rx->sta)
1644 return RX_CONTINUE; 1607 return RX_DROP_MONITOR;
1645 tid = le16_to_cpu(bar->control) >> 12; 1608 tid = le16_to_cpu(bar->control) >> 12;
1646 if (rx->sta->ampdu_mlme.tid_state_rx[tid] 1609 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1647 != HT_AGG_STATE_OPERATIONAL) 1610 != HT_AGG_STATE_OPERATIONAL)
1648 return RX_CONTINUE; 1611 return RX_DROP_MONITOR;
1649 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1612 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1650 1613
1651 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1614 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@@ -1655,13 +1618,10 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1655 mod_timer(&tid_agg_rx->session_timer, 1618 mod_timer(&tid_agg_rx->session_timer,
1656 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 1619 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1657 1620
1658 /* manage reordering buffer according to requested */ 1621 /* release stored frames up to start of BAR */
1659 /* sequence number */ 1622 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
1660 rcu_read_lock(); 1623 kfree_skb(skb);
1661 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, 1624 return RX_QUEUED;
1662 start_seq_num, 1);
1663 rcu_read_unlock();
1664 return RX_DROP_UNUSABLE;
1665 } 1625 }
1666 1626
1667 return RX_CONTINUE; 1627 return RX_CONTINUE;
@@ -1710,14 +1670,14 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1710 mgmt->u.action.u.sa_query.trans_id, 1670 mgmt->u.action.u.sa_query.trans_id,
1711 WLAN_SA_QUERY_TR_ID_LEN); 1671 WLAN_SA_QUERY_TR_ID_LEN);
1712 1672
1713 ieee80211_tx_skb(sdata, skb, 1); 1673 ieee80211_tx_skb(sdata, skb);
1714} 1674}
1715 1675
1716static ieee80211_rx_result debug_noinline 1676static ieee80211_rx_result debug_noinline
1717ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 1677ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1718{ 1678{
1719 struct ieee80211_local *local = rx->local; 1679 struct ieee80211_local *local = rx->local;
1720 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1680 struct ieee80211_sub_if_data *sdata = rx->sdata;
1721 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1681 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1722 int len = rx->skb->len; 1682 int len = rx->skb->len;
1723 1683
@@ -1829,7 +1789,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1829static ieee80211_rx_result debug_noinline 1789static ieee80211_rx_result debug_noinline
1830ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 1790ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1831{ 1791{
1832 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1792 struct ieee80211_sub_if_data *sdata = rx->sdata;
1833 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1793 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1834 1794
1835 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1795 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
@@ -1867,11 +1827,11 @@ static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
1867 * Some hardware seem to generate incorrect Michael MIC 1827 * Some hardware seem to generate incorrect Michael MIC
1868 * reports; ignore them to avoid triggering countermeasures. 1828 * reports; ignore them to avoid triggering countermeasures.
1869 */ 1829 */
1870 goto ignore; 1830 return;
1871 } 1831 }
1872 1832
1873 if (!ieee80211_has_protected(hdr->frame_control)) 1833 if (!ieee80211_has_protected(hdr->frame_control))
1874 goto ignore; 1834 return;
1875 1835
1876 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { 1836 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1877 /* 1837 /*
@@ -1880,35 +1840,33 @@ static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
1880 * group keys and only the AP is sending real multicast 1840 * group keys and only the AP is sending real multicast
1881 * frames in the BSS. 1841 * frames in the BSS.
1882 */ 1842 */
1883 goto ignore; 1843 return;
1884 } 1844 }
1885 1845
1886 if (!ieee80211_is_data(hdr->frame_control) && 1846 if (!ieee80211_is_data(hdr->frame_control) &&
1887 !ieee80211_is_auth(hdr->frame_control)) 1847 !ieee80211_is_auth(hdr->frame_control))
1888 goto ignore; 1848 return;
1889 1849
1890 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL, 1850 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
1891 GFP_ATOMIC); 1851 GFP_ATOMIC);
1892 ignore:
1893 dev_kfree_skb(rx->skb);
1894 rx->skb = NULL;
1895} 1852}
1896 1853
1897/* TODO: use IEEE80211_RX_FRAGMENTED */ 1854/* TODO: use IEEE80211_RX_FRAGMENTED */
1898static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx) 1855static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
1856 struct ieee80211_rate *rate)
1899{ 1857{
1900 struct ieee80211_sub_if_data *sdata; 1858 struct ieee80211_sub_if_data *sdata;
1901 struct ieee80211_local *local = rx->local; 1859 struct ieee80211_local *local = rx->local;
1902 struct ieee80211_rtap_hdr { 1860 struct ieee80211_rtap_hdr {
1903 struct ieee80211_radiotap_header hdr; 1861 struct ieee80211_radiotap_header hdr;
1904 u8 flags; 1862 u8 flags;
1905 u8 rate; 1863 u8 rate_or_pad;
1906 __le16 chan_freq; 1864 __le16 chan_freq;
1907 __le16 chan_flags; 1865 __le16 chan_flags;
1908 } __attribute__ ((packed)) *rthdr; 1866 } __attribute__ ((packed)) *rthdr;
1909 struct sk_buff *skb = rx->skb, *skb2; 1867 struct sk_buff *skb = rx->skb, *skb2;
1910 struct net_device *prev_dev = NULL; 1868 struct net_device *prev_dev = NULL;
1911 struct ieee80211_rx_status *status = rx->status; 1869 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1912 1870
1913 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED) 1871 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1914 goto out_free_skb; 1872 goto out_free_skb;
@@ -1922,10 +1880,13 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1922 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); 1880 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1923 rthdr->hdr.it_present = 1881 rthdr->hdr.it_present =
1924 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 1882 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1925 (1 << IEEE80211_RADIOTAP_RATE) |
1926 (1 << IEEE80211_RADIOTAP_CHANNEL)); 1883 (1 << IEEE80211_RADIOTAP_CHANNEL));
1927 1884
1928 rthdr->rate = rx->rate->bitrate / 5; 1885 if (rate) {
1886 rthdr->rate_or_pad = rate->bitrate / 5;
1887 rthdr->hdr.it_present |=
1888 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
1889 }
1929 rthdr->chan_freq = cpu_to_le16(status->freq); 1890 rthdr->chan_freq = cpu_to_le16(status->freq);
1930 1891
1931 if (status->band == IEEE80211_BAND_5GHZ) 1892 if (status->band == IEEE80211_BAND_5GHZ)
@@ -1978,13 +1939,13 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1978 1939
1979static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, 1940static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1980 struct ieee80211_rx_data *rx, 1941 struct ieee80211_rx_data *rx,
1981 struct sk_buff *skb) 1942 struct sk_buff *skb,
1943 struct ieee80211_rate *rate)
1982{ 1944{
1983 ieee80211_rx_result res = RX_DROP_MONITOR; 1945 ieee80211_rx_result res = RX_DROP_MONITOR;
1984 1946
1985 rx->skb = skb; 1947 rx->skb = skb;
1986 rx->sdata = sdata; 1948 rx->sdata = sdata;
1987 rx->dev = sdata->dev;
1988 1949
1989#define CALL_RXH(rxh) \ 1950#define CALL_RXH(rxh) \
1990 do { \ 1951 do { \
@@ -2023,7 +1984,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2023 rx->sta->rx_dropped++; 1984 rx->sta->rx_dropped++;
2024 /* fall through */ 1985 /* fall through */
2025 case RX_CONTINUE: 1986 case RX_CONTINUE:
2026 ieee80211_rx_cooked_monitor(rx); 1987 ieee80211_rx_cooked_monitor(rx, rate);
2027 break; 1988 break;
2028 case RX_DROP_UNUSABLE: 1989 case RX_DROP_UNUSABLE:
2029 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 1990 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
@@ -2043,12 +2004,14 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2043 struct ieee80211_rx_data *rx, 2004 struct ieee80211_rx_data *rx,
2044 struct ieee80211_hdr *hdr) 2005 struct ieee80211_hdr *hdr)
2045{ 2006{
2046 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type); 2007 struct sk_buff *skb = rx->skb;
2008 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2009 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2047 int multicast = is_multicast_ether_addr(hdr->addr1); 2010 int multicast = is_multicast_ether_addr(hdr->addr1);
2048 2011
2049 switch (sdata->vif.type) { 2012 switch (sdata->vif.type) {
2050 case NL80211_IFTYPE_STATION: 2013 case NL80211_IFTYPE_STATION:
2051 if (!bssid) 2014 if (!bssid && !sdata->u.mgd.use_4addr)
2052 return 0; 2015 return 0;
2053 if (!multicast && 2016 if (!multicast &&
2054 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { 2017 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
@@ -2075,10 +2038,10 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2075 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2038 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2076 } else if (!rx->sta) { 2039 } else if (!rx->sta) {
2077 int rate_idx; 2040 int rate_idx;
2078 if (rx->status->flag & RX_FLAG_HT) 2041 if (status->flag & RX_FLAG_HT)
2079 rate_idx = 0; /* TODO: HT rates */ 2042 rate_idx = 0; /* TODO: HT rates */
2080 else 2043 else
2081 rate_idx = rx->status->rate_idx; 2044 rate_idx = status->rate_idx;
2082 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2, 2045 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2083 BIT(rate_idx)); 2046 BIT(rate_idx));
2084 } 2047 }
@@ -2113,8 +2076,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2113 return 0; 2076 return 0;
2114 break; 2077 break;
2115 case NL80211_IFTYPE_MONITOR: 2078 case NL80211_IFTYPE_MONITOR:
2116 /* take everything */
2117 break;
2118 case NL80211_IFTYPE_UNSPECIFIED: 2079 case NL80211_IFTYPE_UNSPECIFIED:
2119 case __NL80211_IFTYPE_AFTER_LAST: 2080 case __NL80211_IFTYPE_AFTER_LAST:
2120 /* should never get here */ 2081 /* should never get here */
@@ -2147,23 +2108,9 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2147 rx.skb = skb; 2108 rx.skb = skb;
2148 rx.local = local; 2109 rx.local = local;
2149 2110
2150 rx.status = status;
2151 rx.rate = rate;
2152
2153 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) 2111 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2154 local->dot11ReceivedFragmentCount++; 2112 local->dot11ReceivedFragmentCount++;
2155 2113
2156 rx.sta = sta_info_get(local, hdr->addr2);
2157 if (rx.sta) {
2158 rx.sdata = rx.sta->sdata;
2159 rx.dev = rx.sta->sdata->dev;
2160 }
2161
2162 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2163 ieee80211_rx_michael_mic_report(hdr, &rx);
2164 return;
2165 }
2166
2167 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2114 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2168 test_bit(SCAN_OFF_CHANNEL, &local->scanning))) 2115 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2169 rx.flags |= IEEE80211_RX_IN_SCAN; 2116 rx.flags |= IEEE80211_RX_IN_SCAN;
@@ -2171,13 +2118,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2171 ieee80211_parse_qos(&rx); 2118 ieee80211_parse_qos(&rx);
2172 ieee80211_verify_alignment(&rx); 2119 ieee80211_verify_alignment(&rx);
2173 2120
2174 skb = rx.skb; 2121 rx.sta = sta_info_get(local, hdr->addr2);
2122 if (rx.sta)
2123 rx.sdata = rx.sta->sdata;
2175 2124
2176 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { 2125 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
2177 rx.flags |= IEEE80211_RX_RA_MATCH; 2126 rx.flags |= IEEE80211_RX_RA_MATCH;
2178 prepares = prepare_for_handlers(rx.sdata, &rx, hdr); 2127 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2179 if (prepares) 2128 if (prepares) {
2180 prev = rx.sdata; 2129 if (status->flag & RX_FLAG_MMIC_ERROR) {
2130 if (rx.flags & IEEE80211_RX_RA_MATCH)
2131 ieee80211_rx_michael_mic_report(hdr, &rx);
2132 } else
2133 prev = rx.sdata;
2134 }
2181 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2135 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2182 if (!netif_running(sdata->dev)) 2136 if (!netif_running(sdata->dev))
2183 continue; 2137 continue;
@@ -2192,6 +2146,13 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2192 if (!prepares) 2146 if (!prepares)
2193 continue; 2147 continue;
2194 2148
2149 if (status->flag & RX_FLAG_MMIC_ERROR) {
2150 rx.sdata = sdata;
2151 if (rx.flags & IEEE80211_RX_RA_MATCH)
2152 ieee80211_rx_michael_mic_report(hdr, &rx);
2153 continue;
2154 }
2155
2195 /* 2156 /*
2196 * frame is destined for this interface, but if it's not 2157 * frame is destined for this interface, but if it's not
2197 * also for the previous one we handle that after the 2158 * also for the previous one we handle that after the
@@ -2217,11 +2178,11 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2217 prev->dev->name); 2178 prev->dev->name);
2218 continue; 2179 continue;
2219 } 2180 }
2220 ieee80211_invoke_rx_handlers(prev, &rx, skb_new); 2181 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2221 prev = sdata; 2182 prev = sdata;
2222 } 2183 }
2223 if (prev) 2184 if (prev)
2224 ieee80211_invoke_rx_handlers(prev, &rx, skb); 2185 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
2225 else 2186 else
2226 dev_kfree_skb(skb); 2187 dev_kfree_skb(skb);
2227} 2188}
@@ -2250,7 +2211,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2250 int index) 2211 int index)
2251{ 2212{
2252 struct ieee80211_supported_band *sband; 2213 struct ieee80211_supported_band *sband;
2253 struct ieee80211_rate *rate; 2214 struct ieee80211_rate *rate = NULL;
2254 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 2215 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
2255 struct ieee80211_rx_status *status; 2216 struct ieee80211_rx_status *status;
2256 2217
@@ -2261,9 +2222,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2261 2222
2262 /* release the reordered frames to stack */ 2223 /* release the reordered frames to stack */
2263 sband = hw->wiphy->bands[status->band]; 2224 sband = hw->wiphy->bands[status->band];
2264 if (status->flag & RX_FLAG_HT) 2225 if (!(status->flag & RX_FLAG_HT))
2265 rate = sband->bitrates; /* TODO: HT rates */
2266 else
2267 rate = &sband->bitrates[status->rate_idx]; 2226 rate = &sband->bitrates[status->rate_idx];
2268 __ieee80211_rx_handle_packet(hw, skb, rate); 2227 __ieee80211_rx_handle_packet(hw, skb, rate);
2269 tid_agg_rx->stored_mpdu_num--; 2228 tid_agg_rx->stored_mpdu_num--;
@@ -2273,6 +2232,18 @@ no_frame:
2273 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2232 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2274} 2233}
2275 2234
2235static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
2236 struct tid_ampdu_rx *tid_agg_rx,
2237 u16 head_seq_num)
2238{
2239 int index;
2240
2241 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2242 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2243 tid_agg_rx->buf_size;
2244 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2245 }
2246}
2276 2247
2277/* 2248/*
2278 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 2249 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
@@ -2284,15 +2255,17 @@ no_frame:
2284#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 2255#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2285 2256
2286/* 2257/*
2287 * As it function blongs to Rx path it must be called with 2258 * As this function belongs to the RX path it must be under
2288 * the proper rcu_read_lock protection for its flow. 2259 * rcu_read_lock protection. It returns false if the frame
2260 * can be processed immediately, true if it was consumed.
2289 */ 2261 */
2290static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 2262static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2291 struct tid_ampdu_rx *tid_agg_rx, 2263 struct tid_ampdu_rx *tid_agg_rx,
2292 struct sk_buff *skb, 2264 struct sk_buff *skb)
2293 u16 mpdu_seq_num,
2294 int bar_req)
2295{ 2265{
2266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2267 u16 sc = le16_to_cpu(hdr->seq_ctrl);
2268 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2296 u16 head_seq_num, buf_size; 2269 u16 head_seq_num, buf_size;
2297 int index; 2270 int index;
2298 2271
@@ -2302,47 +2275,37 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2302 /* frame with out of date sequence number */ 2275 /* frame with out of date sequence number */
2303 if (seq_less(mpdu_seq_num, head_seq_num)) { 2276 if (seq_less(mpdu_seq_num, head_seq_num)) {
2304 dev_kfree_skb(skb); 2277 dev_kfree_skb(skb);
2305 return 1; 2278 return true;
2306 } 2279 }
2307 2280
2308 /* if frame sequence number exceeds our buffering window size or 2281 /*
2309 * block Ack Request arrived - release stored frames */ 2282 * If frame the sequence number exceeds our buffering window
2310 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) { 2283 * size release some previous frames to make room for this one.
2311 /* new head to the ordering buffer */ 2284 */
2312 if (bar_req) 2285 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
2313 head_seq_num = mpdu_seq_num; 2286 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
2314 else
2315 head_seq_num =
2316 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2317 /* release stored frames up to new head to stack */ 2287 /* release stored frames up to new head to stack */
2318 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 2288 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
2319 index = seq_sub(tid_agg_rx->head_seq_num,
2320 tid_agg_rx->ssn)
2321 % tid_agg_rx->buf_size;
2322 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2323 index);
2324 }
2325 if (bar_req)
2326 return 1;
2327 } 2289 }
2328 2290
2329 /* now the new frame is always in the range of the reordering */ 2291 /* Now the new frame is always in the range of the reordering buffer */
2330 /* buffer window */ 2292
2331 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) 2293 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2332 % tid_agg_rx->buf_size; 2294
2333 /* check if we already stored this frame */ 2295 /* check if we already stored this frame */
2334 if (tid_agg_rx->reorder_buf[index]) { 2296 if (tid_agg_rx->reorder_buf[index]) {
2335 dev_kfree_skb(skb); 2297 dev_kfree_skb(skb);
2336 return 1; 2298 return true;
2337 } 2299 }
2338 2300
2339 /* if arrived mpdu is in the right order and nothing else stored */ 2301 /*
2340 /* release it immediately */ 2302 * If the current MPDU is in the right order and nothing else
2303 * is stored we can process it directly, no need to buffer it.
2304 */
2341 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 2305 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2342 tid_agg_rx->stored_mpdu_num == 0) { 2306 tid_agg_rx->stored_mpdu_num == 0) {
2343 tid_agg_rx->head_seq_num = 2307 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2344 seq_inc(tid_agg_rx->head_seq_num); 2308 return false;
2345 return 0;
2346 } 2309 }
2347 2310
2348 /* put the frame in the reordering buffer */ 2311 /* put the frame in the reordering buffer */
@@ -2350,8 +2313,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2350 tid_agg_rx->reorder_time[index] = jiffies; 2313 tid_agg_rx->reorder_time[index] = jiffies;
2351 tid_agg_rx->stored_mpdu_num++; 2314 tid_agg_rx->stored_mpdu_num++;
2352 /* release the buffer until next missing frame */ 2315 /* release the buffer until next missing frame */
2353 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) 2316 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2354 % tid_agg_rx->buf_size; 2317 tid_agg_rx->buf_size;
2355 if (!tid_agg_rx->reorder_buf[index] && 2318 if (!tid_agg_rx->reorder_buf[index] &&
2356 tid_agg_rx->stored_mpdu_num > 1) { 2319 tid_agg_rx->stored_mpdu_num > 1) {
2357 /* 2320 /*
@@ -2362,12 +2325,12 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2362 int skipped = 1; 2325 int skipped = 1;
2363 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 2326 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2364 j = (j + 1) % tid_agg_rx->buf_size) { 2327 j = (j + 1) % tid_agg_rx->buf_size) {
2365 if (tid_agg_rx->reorder_buf[j] == NULL) { 2328 if (!tid_agg_rx->reorder_buf[j]) {
2366 skipped++; 2329 skipped++;
2367 continue; 2330 continue;
2368 } 2331 }
2369 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + 2332 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2370 HZ / 10)) 2333 HT_RX_REORDER_BUF_TIMEOUT))
2371 break; 2334 break;
2372 2335
2373#ifdef CONFIG_MAC80211_HT_DEBUG 2336#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -2383,51 +2346,56 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2383 * Increment the head seq# also for the skipped slots. 2346 * Increment the head seq# also for the skipped slots.
2384 */ 2347 */
2385 tid_agg_rx->head_seq_num = 2348 tid_agg_rx->head_seq_num =
2386 (tid_agg_rx->head_seq_num + skipped) & 2349 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
2387 SEQ_MASK;
2388 skipped = 0; 2350 skipped = 0;
2389 } 2351 }
2390 } else while (tid_agg_rx->reorder_buf[index]) { 2352 } else while (tid_agg_rx->reorder_buf[index]) {
2391 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 2353 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2392 index = seq_sub(tid_agg_rx->head_seq_num, 2354 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2393 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 2355 tid_agg_rx->buf_size;
2394 } 2356 }
2395 return 1; 2357
2358 return true;
2396} 2359}
2397 2360
2398static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, 2361/*
2399 struct sk_buff *skb) 2362 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
2363 * true if the MPDU was buffered, false if it should be processed.
2364 */
2365static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2366 struct sk_buff *skb)
2400{ 2367{
2401 struct ieee80211_hw *hw = &local->hw; 2368 struct ieee80211_hw *hw = &local->hw;
2402 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2369 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2403 struct sta_info *sta; 2370 struct sta_info *sta;
2404 struct tid_ampdu_rx *tid_agg_rx; 2371 struct tid_ampdu_rx *tid_agg_rx;
2405 u16 sc; 2372 u16 sc;
2406 u16 mpdu_seq_num;
2407 u8 ret = 0;
2408 int tid; 2373 int tid;
2409 2374
2375 if (!ieee80211_is_data_qos(hdr->frame_control))
2376 return false;
2377
2378 /*
2379 * filter the QoS data rx stream according to
2380 * STA/TID and check if this STA/TID is on aggregation
2381 */
2382
2410 sta = sta_info_get(local, hdr->addr2); 2383 sta = sta_info_get(local, hdr->addr2);
2411 if (!sta) 2384 if (!sta)
2412 return ret; 2385 return false;
2413
2414 /* filter the QoS data rx stream according to
2415 * STA/TID and check if this STA/TID is on aggregation */
2416 if (!ieee80211_is_data_qos(hdr->frame_control))
2417 goto end_reorder;
2418 2386
2419 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 2387 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2420 2388
2421 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) 2389 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2422 goto end_reorder; 2390 return false;
2423 2391
2424 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; 2392 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2425 2393
2426 /* qos null data frames are excluded */ 2394 /* qos null data frames are excluded */
2427 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 2395 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2428 goto end_reorder; 2396 return false;
2429 2397
2430 /* new un-ordered ampdu frame - process it */ 2398 /* new, potentially un-ordered, ampdu frame - process it */
2431 2399
2432 /* reset session timer */ 2400 /* reset session timer */
2433 if (tid_agg_rx->timeout) 2401 if (tid_agg_rx->timeout)
@@ -2439,16 +2407,11 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2439 if (sc & IEEE80211_SCTL_FRAG) { 2407 if (sc & IEEE80211_SCTL_FRAG) {
2440 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, 2408 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2441 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 2409 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2442 ret = 1; 2410 dev_kfree_skb(skb);
2443 goto end_reorder; 2411 return true;
2444 } 2412 }
2445 2413
2446 /* according to mpdu sequence number deal with reordering buffer */ 2414 return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb);
2447 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2448 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2449 mpdu_seq_num, 0);
2450 end_reorder:
2451 return ret;
2452} 2415}
2453 2416
2454/* 2417/*
@@ -2490,14 +2453,22 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2490 goto drop; 2453 goto drop;
2491 2454
2492 if (status->flag & RX_FLAG_HT) { 2455 if (status->flag & RX_FLAG_HT) {
2493 /* rate_idx is MCS index */ 2456 /*
2494 if (WARN_ON(status->rate_idx < 0 || 2457 * rate_idx is MCS index, which can be [0-76] as documented on:
2495 status->rate_idx >= 76)) 2458 *
2459 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2460 *
2461 * Anything else would be some sort of driver or hardware error.
2462 * The driver should catch hardware errors.
2463 */
2464 if (WARN((status->rate_idx < 0 ||
2465 status->rate_idx > 76),
2466 "Rate marked as an HT rate but passed "
2467 "status->rate_idx is not "
2468 "an MCS index [0-76]: %d (0x%02x)\n",
2469 status->rate_idx,
2470 status->rate_idx))
2496 goto drop; 2471 goto drop;
2497 /* HT rates are not in the table - use the highest legacy rate
2498 * for now since other parts of mac80211 may not yet be fully
2499 * MCS aware. */
2500 rate = &sband->bitrates[sband->n_bitrates - 1];
2501 } else { 2472 } else {
2502 if (WARN_ON(status->rate_idx < 0 || 2473 if (WARN_ON(status->rate_idx < 0 ||
2503 status->rate_idx >= sband->n_bitrates)) 2474 status->rate_idx >= sband->n_bitrates))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 7a350d2690a0..4cf387c944bf 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,8 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15/* TODO: figure out how to avoid that the "current BSS" expires */
16
17#include <linux/wireless.h> 15#include <linux/wireless.h>
18#include <linux/if_arp.h> 16#include <linux/if_arp.h>
19#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
@@ -189,6 +187,39 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
189 return RX_QUEUED; 187 return RX_QUEUED;
190} 188}
191 189
190/* return false if no more work */
191static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
192{
193 struct cfg80211_scan_request *req = local->scan_req;
194 enum ieee80211_band band;
195 int i, ielen, n_chans;
196
197 do {
198 if (local->hw_scan_band == IEEE80211_NUM_BANDS)
199 return false;
200
201 band = local->hw_scan_band;
202 n_chans = 0;
203 for (i = 0; i < req->n_channels; i++) {
204 if (req->channels[i]->band == band) {
205 local->hw_scan_req->channels[n_chans] =
206 req->channels[i];
207 n_chans++;
208 }
209 }
210
211 local->hw_scan_band++;
212 } while (!n_chans);
213
214 local->hw_scan_req->n_channels = n_chans;
215
216 ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
217 req->ie, req->ie_len, band);
218 local->hw_scan_req->ie_len = ielen;
219
220 return true;
221}
222
192/* 223/*
193 * inform AP that we will go to sleep so that it will buffer the frames 224 * inform AP that we will go to sleep so that it will buffer the frames
194 * while we scan 225 * while we scan
@@ -249,13 +280,6 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
249 } 280 }
250} 281}
251 282
252static void ieee80211_restore_scan_ies(struct ieee80211_local *local)
253{
254 kfree(local->scan_req->ie);
255 local->scan_req->ie = local->orig_ies;
256 local->scan_req->ie_len = local->orig_ies_len;
257}
258
259void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 283void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
260{ 284{
261 struct ieee80211_local *local = hw_to_local(hw); 285 struct ieee80211_local *local = hw_to_local(hw);
@@ -264,25 +288,36 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
264 288
265 mutex_lock(&local->scan_mtx); 289 mutex_lock(&local->scan_mtx);
266 290
267 if (WARN_ON(!local->scanning)) { 291 /*
292 * It's ok to abort a not-yet-running scan (that
293 * we have one at all will be verified by checking
294 * local->scan_req next), but not to complete it
295 * successfully.
296 */
297 if (WARN_ON(!local->scanning && !aborted))
298 aborted = true;
299
300 if (WARN_ON(!local->scan_req)) {
268 mutex_unlock(&local->scan_mtx); 301 mutex_unlock(&local->scan_mtx);
269 return; 302 return;
270 } 303 }
271 304
272 if (WARN_ON(!local->scan_req)) { 305 was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
306 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
307 ieee80211_queue_delayed_work(&local->hw,
308 &local->scan_work, 0);
273 mutex_unlock(&local->scan_mtx); 309 mutex_unlock(&local->scan_mtx);
274 return; 310 return;
275 } 311 }
276 312
277 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) 313 kfree(local->hw_scan_req);
278 ieee80211_restore_scan_ies(local); 314 local->hw_scan_req = NULL;
279 315
280 if (local->scan_req != local->int_scan_req) 316 if (local->scan_req != local->int_scan_req)
281 cfg80211_scan_done(local->scan_req, aborted); 317 cfg80211_scan_done(local->scan_req, aborted);
282 local->scan_req = NULL; 318 local->scan_req = NULL;
283 local->scan_sdata = NULL; 319 local->scan_sdata = NULL;
284 320
285 was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
286 local->scanning = 0; 321 local->scanning = 0;
287 local->scan_channel = NULL; 322 local->scan_channel = NULL;
288 323
@@ -394,19 +429,23 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
394 429
395 if (local->ops->hw_scan) { 430 if (local->ops->hw_scan) {
396 u8 *ies; 431 u8 *ies;
397 int ielen;
398 432
399 ies = kmalloc(2 + IEEE80211_MAX_SSID_LEN + 433 local->hw_scan_req = kmalloc(
400 local->scan_ies_len + req->ie_len, GFP_KERNEL); 434 sizeof(*local->hw_scan_req) +
401 if (!ies) 435 req->n_channels * sizeof(req->channels[0]) +
436 2 + IEEE80211_MAX_SSID_LEN + local->scan_ies_len +
437 req->ie_len, GFP_KERNEL);
438 if (!local->hw_scan_req)
402 return -ENOMEM; 439 return -ENOMEM;
403 440
404 ielen = ieee80211_build_preq_ies(local, ies, 441 local->hw_scan_req->ssids = req->ssids;
405 req->ie, req->ie_len); 442 local->hw_scan_req->n_ssids = req->n_ssids;
406 local->orig_ies = req->ie; 443 ies = (u8 *)local->hw_scan_req +
407 local->orig_ies_len = req->ie_len; 444 sizeof(*local->hw_scan_req) +
408 req->ie = ies; 445 req->n_channels * sizeof(req->channels[0]);
409 req->ie_len = ielen; 446 local->hw_scan_req->ie = ies;
447
448 local->hw_scan_band = 0;
410 } 449 }
411 450
412 local->scan_req = req; 451 local->scan_req = req;
@@ -438,16 +477,17 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
438 ieee80211_recalc_idle(local); 477 ieee80211_recalc_idle(local);
439 mutex_unlock(&local->scan_mtx); 478 mutex_unlock(&local->scan_mtx);
440 479
441 if (local->ops->hw_scan) 480 if (local->ops->hw_scan) {
442 rc = drv_hw_scan(local, local->scan_req); 481 WARN_ON(!ieee80211_prep_hw_scan(local));
443 else 482 rc = drv_hw_scan(local, local->hw_scan_req);
483 } else
444 rc = ieee80211_start_sw_scan(local); 484 rc = ieee80211_start_sw_scan(local);
445 485
446 mutex_lock(&local->scan_mtx); 486 mutex_lock(&local->scan_mtx);
447 487
448 if (rc) { 488 if (rc) {
449 if (local->ops->hw_scan) 489 kfree(local->hw_scan_req);
450 ieee80211_restore_scan_ies(local); 490 local->hw_scan_req = NULL;
451 local->scanning = 0; 491 local->scanning = 0;
452 492
453 ieee80211_recalc_idle(local); 493 ieee80211_recalc_idle(local);
@@ -574,23 +614,14 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
574{ 614{
575 int skip; 615 int skip;
576 struct ieee80211_channel *chan; 616 struct ieee80211_channel *chan;
577 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
578 617
579 skip = 0; 618 skip = 0;
580 chan = local->scan_req->channels[local->scan_channel_idx]; 619 chan = local->scan_req->channels[local->scan_channel_idx];
581 620
582 if (chan->flags & IEEE80211_CHAN_DISABLED || 621 local->scan_channel = chan;
583 (sdata->vif.type == NL80211_IFTYPE_ADHOC && 622 if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
584 chan->flags & IEEE80211_CHAN_NO_IBSS))
585 skip = 1; 623 skip = 1;
586 624
587 if (!skip) {
588 local->scan_channel = chan;
589 if (ieee80211_hw_config(local,
590 IEEE80211_CONF_CHANGE_CHANNEL))
591 skip = 1;
592 }
593
594 /* advance state machine to next channel/band */ 625 /* advance state machine to next channel/band */
595 local->scan_channel_idx++; 626 local->scan_channel_idx++;
596 627
@@ -656,6 +687,14 @@ void ieee80211_scan_work(struct work_struct *work)
656 return; 687 return;
657 } 688 }
658 689
690 if (local->hw_scan_req) {
691 int rc = drv_hw_scan(local, local->hw_scan_req);
692 mutex_unlock(&local->scan_mtx);
693 if (rc)
694 ieee80211_scan_completed(&local->hw, true);
695 return;
696 }
697
659 if (local->scan_req && !local->scanning) { 698 if (local->scan_req && !local->scanning) {
660 struct cfg80211_scan_request *req = local->scan_req; 699 struct cfg80211_scan_request *req = local->scan_req;
661 int rc; 700 int rc;
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 68953033403d..aa743a895cf9 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -65,7 +65,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
65 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; 65 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
66 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; 66 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
67 67
68 ieee80211_tx_skb(sdata, skb, 1); 68 ieee80211_tx_skb(sdata, skb);
69} 69}
70 70
71void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 71void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 594f2318c3d8..71f370dd24bc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -116,14 +116,15 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
116 return sta; 116 return sta;
117} 117}
118 118
119struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, 119struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
120 struct net_device *dev) 120 int idx)
121{ 121{
122 struct ieee80211_local *local = sdata->local;
122 struct sta_info *sta; 123 struct sta_info *sta;
123 int i = 0; 124 int i = 0;
124 125
125 list_for_each_entry_rcu(sta, &local->sta_list, list) { 126 list_for_each_entry_rcu(sta, &local->sta_list, list) {
126 if (dev && dev != sta->sdata->dev) 127 if (sdata != sta->sdata)
127 continue; 128 continue;
128 if (i < idx) { 129 if (i < idx) {
129 ++i; 130 ++i;
@@ -147,8 +148,10 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
147static void __sta_info_free(struct ieee80211_local *local, 148static void __sta_info_free(struct ieee80211_local *local,
148 struct sta_info *sta) 149 struct sta_info *sta)
149{ 150{
150 rate_control_free_sta(sta); 151 if (sta->rate_ctrl) {
151 rate_control_put(sta->rate_ctrl); 152 rate_control_free_sta(sta);
153 rate_control_put(sta->rate_ctrl);
154 }
152 155
153#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 156#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
154 printk(KERN_DEBUG "%s: Destroyed STA %pM\n", 157 printk(KERN_DEBUG "%s: Destroyed STA %pM\n",
@@ -171,6 +174,8 @@ void sta_info_destroy(struct sta_info *sta)
171 174
172 local = sta->local; 175 local = sta->local;
173 176
177 cancel_work_sync(&sta->drv_unblock_wk);
178
174 rate_control_remove_sta_debugfs(sta); 179 rate_control_remove_sta_debugfs(sta);
175 ieee80211_sta_debugfs_remove(sta); 180 ieee80211_sta_debugfs_remove(sta);
176 181
@@ -259,6 +264,38 @@ static void sta_info_hash_add(struct ieee80211_local *local,
259 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); 264 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
260} 265}
261 266
267static void sta_unblock(struct work_struct *wk)
268{
269 struct sta_info *sta;
270
271 sta = container_of(wk, struct sta_info, drv_unblock_wk);
272
273 if (sta->dead)
274 return;
275
276 if (!test_sta_flags(sta, WLAN_STA_PS_STA))
277 ieee80211_sta_ps_deliver_wakeup(sta);
278 else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL))
279 ieee80211_sta_ps_deliver_poll_response(sta);
280}
281
282static int sta_prepare_rate_control(struct ieee80211_local *local,
283 struct sta_info *sta, gfp_t gfp)
284{
285 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
286 return 0;
287
288 sta->rate_ctrl = rate_control_get(local->rate_ctrl);
289 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
290 &sta->sta, gfp);
291 if (!sta->rate_ctrl_priv) {
292 rate_control_put(sta->rate_ctrl);
293 return -ENOMEM;
294 }
295
296 return 0;
297}
298
262struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 299struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
263 u8 *addr, gfp_t gfp) 300 u8 *addr, gfp_t gfp)
264{ 301{
@@ -272,16 +309,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
272 309
273 spin_lock_init(&sta->lock); 310 spin_lock_init(&sta->lock);
274 spin_lock_init(&sta->flaglock); 311 spin_lock_init(&sta->flaglock);
312 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
275 313
276 memcpy(sta->sta.addr, addr, ETH_ALEN); 314 memcpy(sta->sta.addr, addr, ETH_ALEN);
277 sta->local = local; 315 sta->local = local;
278 sta->sdata = sdata; 316 sta->sdata = sdata;
279 317
280 sta->rate_ctrl = rate_control_get(local->rate_ctrl); 318 if (sta_prepare_rate_control(local, sta, gfp)) {
281 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
282 &sta->sta, gfp);
283 if (!sta->rate_ctrl_priv) {
284 rate_control_put(sta->rate_ctrl);
285 kfree(sta); 319 kfree(sta);
286 return NULL; 320 return NULL;
287 } 321 }
@@ -478,8 +512,10 @@ static void __sta_info_unlink(struct sta_info **sta)
478 } 512 }
479 513
480 list_del(&(*sta)->list); 514 list_del(&(*sta)->list);
515 (*sta)->dead = true;
481 516
482 if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) { 517 if (test_and_clear_sta_flags(*sta,
518 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
483 BUG_ON(!sdata->bss); 519 BUG_ON(!sdata->bss);
484 520
485 atomic_dec(&sdata->bss->num_sta_ps); 521 atomic_dec(&sdata->bss->num_sta_ps);
@@ -489,6 +525,9 @@ static void __sta_info_unlink(struct sta_info **sta)
489 local->num_sta--; 525 local->num_sta--;
490 local->sta_generation++; 526 local->sta_generation++;
491 527
528 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
529 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
530
492 if (local->ops->sta_notify) { 531 if (local->ops->sta_notify) {
493 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 532 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
494 sdata = container_of(sdata->bss, 533 sdata = container_of(sdata->bss,
@@ -801,8 +840,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
801 sta_info_destroy(sta); 840 sta_info_destroy(sta);
802} 841}
803 842
804struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, 843struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
805 const u8 *addr) 844 const u8 *addr)
806{ 845{
807 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); 846 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr);
808 847
@@ -810,4 +849,114 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
810 return NULL; 849 return NULL;
811 return &sta->sta; 850 return &sta->sta;
812} 851}
852EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
853
854struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
855 const u8 *addr)
856{
857 struct ieee80211_sub_if_data *sdata;
858
859 if (!vif)
860 return NULL;
861
862 sdata = vif_to_sdata(vif);
863
864 return ieee80211_find_sta_by_hw(&sdata->local->hw, addr);
865}
813EXPORT_SYMBOL(ieee80211_find_sta); 866EXPORT_SYMBOL(ieee80211_find_sta);
867
868/* powersave support code */
869void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
870{
871 struct ieee80211_sub_if_data *sdata = sta->sdata;
872 struct ieee80211_local *local = sdata->local;
873 int sent, buffered;
874
875 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
876
877 if (!skb_queue_empty(&sta->ps_tx_buf))
878 sta_info_clear_tim_bit(sta);
879
880 /* Send all buffered frames to the station */
881 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
882 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
883 sent += buffered;
884 local->total_ps_buffered -= buffered;
885
886#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
887 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
888 "since STA not sleeping anymore\n", sdata->dev->name,
889 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
890#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
891}
892
893void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
894{
895 struct ieee80211_sub_if_data *sdata = sta->sdata;
896 struct ieee80211_local *local = sdata->local;
897 struct sk_buff *skb;
898 int no_pending_pkts;
899
900 skb = skb_dequeue(&sta->tx_filtered);
901 if (!skb) {
902 skb = skb_dequeue(&sta->ps_tx_buf);
903 if (skb)
904 local->total_ps_buffered--;
905 }
906 no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
907 skb_queue_empty(&sta->ps_tx_buf);
908
909 if (skb) {
910 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
911 struct ieee80211_hdr *hdr =
912 (struct ieee80211_hdr *) skb->data;
913
914 /*
915 * Tell TX path to send this frame even though the STA may
916 * still remain is PS mode after this frame exchange.
917 */
918 info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
919
920#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
921 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
922 sta->sta.addr, sta->sta.aid,
923 skb_queue_len(&sta->ps_tx_buf));
924#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
925
926 /* Use MoreData flag to indicate whether there are more
927 * buffered frames for this STA */
928 if (no_pending_pkts)
929 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
930 else
931 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
932
933 ieee80211_add_pending_skb(local, skb);
934
935 if (no_pending_pkts)
936 sta_info_clear_tim_bit(sta);
937#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
938 } else {
939 /*
940 * FIXME: This can be the result of a race condition between
941 * us expiring a frame and the station polling for it.
942 * Should we send it a null-func frame indicating we
943 * have nothing buffered for it?
944 */
945 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
946 "though there are no buffered frames for it\n",
947 sdata->dev->name, sta->sta.addr);
948#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
949 }
950}
951
952void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
953 struct ieee80211_sta *pubsta, bool block)
954{
955 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
956
957 if (block)
958 set_sta_flags(sta, WLAN_STA_PS_DRIVER);
959 else
960 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
961}
962EXPORT_SYMBOL(ieee80211_sta_block_awake);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ccc3adf962c7..b4810f6aa94f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -12,6 +12,7 @@
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/if_ether.h> 14#include <linux/if_ether.h>
15#include <linux/workqueue.h>
15#include "key.h" 16#include "key.h"
16 17
17/** 18/**
@@ -21,7 +22,7 @@
21 * 22 *
22 * @WLAN_STA_AUTH: Station is authenticated. 23 * @WLAN_STA_AUTH: Station is authenticated.
23 * @WLAN_STA_ASSOC: Station is associated. 24 * @WLAN_STA_ASSOC: Station is associated.
24 * @WLAN_STA_PS: Station is in power-save mode 25 * @WLAN_STA_PS_STA: Station is in power-save mode
25 * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. 26 * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic.
26 * This bit is always checked so needs to be enabled for all stations 27 * This bit is always checked so needs to be enabled for all stations
27 * when virtual port control is not in use. 28 * when virtual port control is not in use.
@@ -36,11 +37,16 @@
36 * @WLAN_STA_MFP: Management frame protection is used with this STA. 37 * @WLAN_STA_MFP: Management frame protection is used with this STA.
37 * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. 38 * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
38 * Used to deny ADDBA requests (both TX and RX). 39 * Used to deny ADDBA requests (both TX and RX).
40 * @WLAN_STA_PS_DRIVER: driver requires keeping this station in
41 * power-save mode logically to flush frames that might still
42 * be in the queues
43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
44 * station in power-save mode, reply when the driver unblocks.
39 */ 45 */
40enum ieee80211_sta_info_flags { 46enum ieee80211_sta_info_flags {
41 WLAN_STA_AUTH = 1<<0, 47 WLAN_STA_AUTH = 1<<0,
42 WLAN_STA_ASSOC = 1<<1, 48 WLAN_STA_ASSOC = 1<<1,
43 WLAN_STA_PS = 1<<2, 49 WLAN_STA_PS_STA = 1<<2,
44 WLAN_STA_AUTHORIZED = 1<<3, 50 WLAN_STA_AUTHORIZED = 1<<3,
45 WLAN_STA_SHORT_PREAMBLE = 1<<4, 51 WLAN_STA_SHORT_PREAMBLE = 1<<4,
46 WLAN_STA_ASSOC_AP = 1<<5, 52 WLAN_STA_ASSOC_AP = 1<<5,
@@ -48,7 +54,9 @@ enum ieee80211_sta_info_flags {
48 WLAN_STA_WDS = 1<<7, 54 WLAN_STA_WDS = 1<<7,
49 WLAN_STA_CLEAR_PS_FILT = 1<<9, 55 WLAN_STA_CLEAR_PS_FILT = 1<<9,
50 WLAN_STA_MFP = 1<<10, 56 WLAN_STA_MFP = 1<<10,
51 WLAN_STA_SUSPEND = 1<<11 57 WLAN_STA_SUSPEND = 1<<11,
58 WLAN_STA_PS_DRIVER = 1<<12,
59 WLAN_STA_PSPOLL = 1<<13,
52}; 60};
53 61
54#define STA_TID_NUM 16 62#define STA_TID_NUM 16
@@ -177,6 +185,7 @@ struct sta_ampdu_mlme {
177 * @lock: used for locking all fields that require locking, see comments 185 * @lock: used for locking all fields that require locking, see comments
178 * in the header file. 186 * in the header file.
179 * @flaglock: spinlock for flags accesses 187 * @flaglock: spinlock for flags accesses
188 * @drv_unblock_wk: used for driver PS unblocking
180 * @listen_interval: listen interval of this station, when we're acting as AP 189 * @listen_interval: listen interval of this station, when we're acting as AP
181 * @pin_status: used internally for pinning a STA struct into memory 190 * @pin_status: used internally for pinning a STA struct into memory
182 * @flags: STA flags, see &enum ieee80211_sta_info_flags 191 * @flags: STA flags, see &enum ieee80211_sta_info_flags
@@ -193,7 +202,6 @@ struct sta_ampdu_mlme {
193 * @rx_fragments: number of received MPDUs 202 * @rx_fragments: number of received MPDUs
194 * @rx_dropped: number of dropped MPDUs from this STA 203 * @rx_dropped: number of dropped MPDUs from this STA
195 * @last_signal: signal of last received frame from this STA 204 * @last_signal: signal of last received frame from this STA
196 * @last_qual: qual of last received frame from this STA
197 * @last_noise: noise of last received frame from this STA 205 * @last_noise: noise of last received frame from this STA
198 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) 206 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
199 * @tx_filtered_count: number of frames the hardware filtered for this STA 207 * @tx_filtered_count: number of frames the hardware filtered for this STA
@@ -217,6 +225,7 @@ struct sta_ampdu_mlme {
217 * @plink_timer_was_running: used by suspend/resume to restore timers 225 * @plink_timer_was_running: used by suspend/resume to restore timers
218 * @debugfs: debug filesystem info 226 * @debugfs: debug filesystem info
219 * @sta: station information we share with the driver 227 * @sta: station information we share with the driver
228 * @dead: set to true when sta is unlinked
220 */ 229 */
221struct sta_info { 230struct sta_info {
222 /* General information, mostly static */ 231 /* General information, mostly static */
@@ -230,8 +239,12 @@ struct sta_info {
230 spinlock_t lock; 239 spinlock_t lock;
231 spinlock_t flaglock; 240 spinlock_t flaglock;
232 241
242 struct work_struct drv_unblock_wk;
243
233 u16 listen_interval; 244 u16 listen_interval;
234 245
246 bool dead;
247
235 /* 248 /*
236 * for use by the internal lifetime management, 249 * for use by the internal lifetime management,
237 * see __sta_info_unlink 250 * see __sta_info_unlink
@@ -259,7 +272,6 @@ struct sta_info {
259 unsigned long rx_fragments; 272 unsigned long rx_fragments;
260 unsigned long rx_dropped; 273 unsigned long rx_dropped;
261 int last_signal; 274 int last_signal;
262 int last_qual;
263 int last_noise; 275 int last_noise;
264 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 276 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
265 277
@@ -301,28 +313,6 @@ struct sta_info {
301#ifdef CONFIG_MAC80211_DEBUGFS 313#ifdef CONFIG_MAC80211_DEBUGFS
302 struct sta_info_debugfsdentries { 314 struct sta_info_debugfsdentries {
303 struct dentry *dir; 315 struct dentry *dir;
304 struct dentry *flags;
305 struct dentry *num_ps_buf_frames;
306 struct dentry *inactive_ms;
307 struct dentry *last_seq_ctrl;
308 struct dentry *agg_status;
309 struct dentry *aid;
310 struct dentry *dev;
311 struct dentry *rx_packets;
312 struct dentry *tx_packets;
313 struct dentry *rx_bytes;
314 struct dentry *tx_bytes;
315 struct dentry *rx_duplicates;
316 struct dentry *rx_fragments;
317 struct dentry *rx_dropped;
318 struct dentry *tx_fragments;
319 struct dentry *tx_filtered;
320 struct dentry *tx_retry_failed;
321 struct dentry *tx_retry_count;
322 struct dentry *last_signal;
323 struct dentry *last_qual;
324 struct dentry *last_noise;
325 struct dentry *wep_weak_iv_count;
326 bool add_has_run; 316 bool add_has_run;
327 } debugfs; 317 } debugfs;
328#endif 318#endif
@@ -419,8 +409,8 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr);
419/* 409/*
420 * Get STA info by index, BROKEN! 410 * Get STA info by index, BROKEN!
421 */ 411 */
422struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, 412struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
423 struct net_device *dev); 413 int idx);
424/* 414/*
425 * Create a new STA info, caller owns returned structure 415 * Create a new STA info, caller owns returned structure
426 * until sta_info_insert(). 416 * until sta_info_insert().
@@ -454,4 +444,7 @@ int sta_info_flush(struct ieee80211_local *local,
454void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 444void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
455 unsigned long exp_time); 445 unsigned long exp_time);
456 446
447void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
448void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
449
457#endif /* STA_INFO_H */ 450#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
new file mode 100644
index 000000000000..9f91fd8e6efb
--- /dev/null
+++ b/net/mac80211/status.c
@@ -0,0 +1,337 @@
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <net/mac80211.h>
13#include "ieee80211_i.h"
14#include "rate.h"
15#include "mesh.h"
16#include "led.h"
17
18
19void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
20 struct sk_buff *skb)
21{
22 struct ieee80211_local *local = hw_to_local(hw);
23 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
24 int tmp;
25
26 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
27 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
28 &local->skb_queue : &local->skb_queue_unreliable, skb);
29 tmp = skb_queue_len(&local->skb_queue) +
30 skb_queue_len(&local->skb_queue_unreliable);
31 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
32 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
33 dev_kfree_skb_irq(skb);
34 tmp--;
35 I802_DEBUG_INC(local->tx_status_drop);
36 }
37 tasklet_schedule(&local->tasklet);
38}
39EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
40
41static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
42 struct sta_info *sta,
43 struct sk_buff *skb)
44{
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46
47 /*
48 * XXX: This is temporary!
49 *
50 * The problem here is that when we get here, the driver will
51 * quite likely have pretty much overwritten info->control by
52 * using info->driver_data or info->rate_driver_data. Thus,
53 * when passing out the frame to the driver again, we would be
54 * passing completely bogus data since the driver would then
55 * expect a properly filled info->control. In mac80211 itself
56 * the same problem occurs, since we need info->control.vif
57 * internally.
58 *
59 * To fix this, we should send the frame through TX processing
60 * again. However, it's not that simple, since the frame will
61 * have been software-encrypted (if applicable) already, and
62 * encrypting it again doesn't do much good. So to properly do
63 * that, we not only have to skip the actual 'raw' encryption
64 * (key selection etc. still has to be done!) but also the
65 * sequence number assignment since that impacts the crypto
66 * encapsulation, of course.
67 *
68 * Hence, for now, fix the bug by just dropping the frame.
69 */
70 goto drop;
71
72 sta->tx_filtered_count++;
73
74 /*
75 * Clear the TX filter mask for this STA when sending the next
76 * packet. If the STA went to power save mode, this will happen
77 * when it wakes up for the next time.
78 */
79 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
80
81 /*
82 * This code races in the following way:
83 *
84 * (1) STA sends frame indicating it will go to sleep and does so
85 * (2) hardware/firmware adds STA to filter list, passes frame up
86 * (3) hardware/firmware processes TX fifo and suppresses a frame
87 * (4) we get TX status before having processed the frame and
88 * knowing that the STA has gone to sleep.
89 *
90 * This is actually quite unlikely even when both those events are
91 * processed from interrupts coming in quickly after one another or
92 * even at the same time because we queue both TX status events and
93 * RX frames to be processed by a tasklet and process them in the
94 * same order that they were received or TX status last. Hence, there
95 * is no race as long as the frame RX is processed before the next TX
96 * status, which drivers can ensure, see below.
97 *
98 * Note that this can only happen if the hardware or firmware can
99 * actually add STAs to the filter list, if this is done by the
100 * driver in response to set_tim() (which will only reduce the race
101 * this whole filtering tries to solve, not completely solve it)
102 * this situation cannot happen.
103 *
104 * To completely solve this race drivers need to make sure that they
105 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
106 * functions and
107 * (b) always process RX events before TX status events if ordering
108 * can be unknown, for example with different interrupt status
109 * bits.
110 */
111 if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
112 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
113 skb_queue_tail(&sta->tx_filtered, skb);
114 return;
115 }
116
117 if (!test_sta_flags(sta, WLAN_STA_PS_STA) &&
118 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
119 /* Software retry the packet once */
120 info->flags |= IEEE80211_TX_INTFL_RETRIED;
121 ieee80211_add_pending_skb(local, skb);
122 return;
123 }
124
125 drop:
126#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
127 if (net_ratelimit())
128 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
129 "queue_len=%d PS=%d @%lu\n",
130 wiphy_name(local->hw.wiphy),
131 skb_queue_len(&sta->tx_filtered),
132 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
133#endif
134 dev_kfree_skb(skb);
135}
136
137void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
138{
139 struct sk_buff *skb2;
140 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
141 struct ieee80211_local *local = hw_to_local(hw);
142 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
143 u16 frag, type;
144 __le16 fc;
145 struct ieee80211_supported_band *sband;
146 struct ieee80211_tx_status_rtap_hdr *rthdr;
147 struct ieee80211_sub_if_data *sdata;
148 struct net_device *prev_dev = NULL;
149 struct sta_info *sta;
150 int retry_count = -1, i;
151
152 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
153 /* the HW cannot have attempted that rate */
154 if (i >= hw->max_rates) {
155 info->status.rates[i].idx = -1;
156 info->status.rates[i].count = 0;
157 }
158
159 retry_count += info->status.rates[i].count;
160 }
161 if (retry_count < 0)
162 retry_count = 0;
163
164 rcu_read_lock();
165
166 sband = local->hw.wiphy->bands[info->band];
167
168 sta = sta_info_get(local, hdr->addr1);
169
170 if (sta) {
171 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
172 test_sta_flags(sta, WLAN_STA_PS_STA)) {
173 /*
174 * The STA is in power save mode, so assume
175 * that this TX packet failed because of that.
176 */
177 ieee80211_handle_filtered_frame(local, sta, skb);
178 rcu_read_unlock();
179 return;
180 }
181
182 fc = hdr->frame_control;
183
184 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
185 (ieee80211_is_data_qos(fc))) {
186 u16 tid, ssn;
187 u8 *qc;
188
189 qc = ieee80211_get_qos_ctl(hdr);
190 tid = qc[0] & 0xf;
191 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
192 & IEEE80211_SCTL_SEQ);
193 ieee80211_send_bar(sta->sdata, hdr->addr1,
194 tid, ssn);
195 }
196
197 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
198 ieee80211_handle_filtered_frame(local, sta, skb);
199 rcu_read_unlock();
200 return;
201 } else {
202 if (!(info->flags & IEEE80211_TX_STAT_ACK))
203 sta->tx_retry_failed++;
204 sta->tx_retry_count += retry_count;
205 }
206
207 rate_control_tx_status(local, sband, sta, skb);
208 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
209 ieee80211s_update_metric(local, sta, skb);
210 }
211
212 rcu_read_unlock();
213
214 ieee80211_led_tx(local, 0);
215
216 /* SNMP counters
217 * Fragments are passed to low-level drivers as separate skbs, so these
218 * are actually fragments, not frames. Update frame counters only for
219 * the first fragment of the frame. */
220
221 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
222 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
223
224 if (info->flags & IEEE80211_TX_STAT_ACK) {
225 if (frag == 0) {
226 local->dot11TransmittedFrameCount++;
227 if (is_multicast_ether_addr(hdr->addr1))
228 local->dot11MulticastTransmittedFrameCount++;
229 if (retry_count > 0)
230 local->dot11RetryCount++;
231 if (retry_count > 1)
232 local->dot11MultipleRetryCount++;
233 }
234
235 /* This counter shall be incremented for an acknowledged MPDU
236 * with an individual address in the address 1 field or an MPDU
237 * with a multicast address in the address 1 field of type Data
238 * or Management. */
239 if (!is_multicast_ether_addr(hdr->addr1) ||
240 type == IEEE80211_FTYPE_DATA ||
241 type == IEEE80211_FTYPE_MGMT)
242 local->dot11TransmittedFragmentCount++;
243 } else {
244 if (frag == 0)
245 local->dot11FailedCount++;
246 }
247
248 /* this was a transmitted frame, but now we want to reuse it */
249 skb_orphan(skb);
250
251 /*
252 * This is a bit racy but we can avoid a lot of work
253 * with this test...
254 */
255 if (!local->monitors && !local->cooked_mntrs) {
256 dev_kfree_skb(skb);
257 return;
258 }
259
260 /* send frame to monitor interfaces now */
261
262 if (skb_headroom(skb) < sizeof(*rthdr)) {
263 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
264 dev_kfree_skb(skb);
265 return;
266 }
267
268 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
269 skb_push(skb, sizeof(*rthdr));
270
271 memset(rthdr, 0, sizeof(*rthdr));
272 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
273 rthdr->hdr.it_present =
274 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
275 (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
276 (1 << IEEE80211_RADIOTAP_RATE));
277
278 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
279 !is_multicast_ether_addr(hdr->addr1))
280 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
281
282 /*
283 * XXX: Once radiotap gets the bitmap reset thing the vendor
284 * extensions proposal contains, we can actually report
285 * the whole set of tries we did.
286 */
287 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
288 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
289 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
290 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
291 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
292 if (info->status.rates[0].idx >= 0 &&
293 !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
294 rthdr->rate = sband->bitrates[
295 info->status.rates[0].idx].bitrate / 5;
296
297 /* for now report the total retry_count */
298 rthdr->data_retries = retry_count;
299
300 /* XXX: is this sufficient for BPF? */
301 skb_set_mac_header(skb, 0);
302 skb->ip_summed = CHECKSUM_UNNECESSARY;
303 skb->pkt_type = PACKET_OTHERHOST;
304 skb->protocol = htons(ETH_P_802_2);
305 memset(skb->cb, 0, sizeof(skb->cb));
306
307 rcu_read_lock();
308 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
309 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
310 if (!netif_running(sdata->dev))
311 continue;
312
313 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
314 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
315 (type == IEEE80211_FTYPE_DATA))
316 continue;
317
318 if (prev_dev) {
319 skb2 = skb_clone(skb, GFP_ATOMIC);
320 if (skb2) {
321 skb2->dev = prev_dev;
322 netif_rx(skb2);
323 }
324 }
325
326 prev_dev = sdata->dev;
327 }
328 }
329 if (prev_dev) {
330 skb->dev = prev_dev;
331 netif_rx(skb);
332 skb = NULL;
333 }
334 rcu_read_unlock();
335 dev_kfree_skb(skb);
336}
337EXPORT_SYMBOL(ieee80211_tx_status);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 964b7faa7f17..4921d724b6c7 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -301,9 +301,9 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
301#endif 301#endif
302 if (key->local->ops->update_tkip_key && 302 if (key->local->ops->update_tkip_key &&
303 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 303 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
304 u8 bcast[ETH_ALEN] = 304 static const u8 bcast[ETH_ALEN] =
305 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 305 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
306 u8 *sta_addr = key->sta->sta.addr; 306 const u8 *sta_addr = key->sta->sta.addr;
307 307
308 if (is_multicast_ether_addr(ra)) 308 if (is_multicast_ether_addr(ra))
309 sta_addr = bcast; 309 sta_addr = bcast;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index db4bda681ec9..8834cc93c716 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -317,12 +317,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
317 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 317 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
318 return TX_CONTINUE; 318 return TX_CONTINUE;
319 319
320 /* buffered in hardware */ 320 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
321 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) {
322 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
323 321
322 /* device releases frame after DTIM beacon */
323 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
324 return TX_CONTINUE; 324 return TX_CONTINUE;
325 }
326 325
327 /* buffered in mac80211 */ 326 /* buffered in mac80211 */
328 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 327 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -367,15 +366,16 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 366 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
368 u32 staflags; 367 u32 staflags;
369 368
370 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control) 369 if (unlikely(!sta ||
371 || ieee80211_is_auth(hdr->frame_control) 370 ieee80211_is_probe_resp(hdr->frame_control) ||
372 || ieee80211_is_assoc_resp(hdr->frame_control) 371 ieee80211_is_auth(hdr->frame_control) ||
373 || ieee80211_is_reassoc_resp(hdr->frame_control))) 372 ieee80211_is_assoc_resp(hdr->frame_control) ||
373 ieee80211_is_reassoc_resp(hdr->frame_control)))
374 return TX_CONTINUE; 374 return TX_CONTINUE;
375 375
376 staflags = get_sta_flags(sta); 376 staflags = get_sta_flags(sta);
377 377
378 if (unlikely((staflags & WLAN_STA_PS) && 378 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
379 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { 379 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
380#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 380#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
381 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " 381 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
@@ -398,8 +398,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
398 } else 398 } else
399 tx->local->total_ps_buffered++; 399 tx->local->total_ps_buffered++;
400 400
401 /* Queue frame to be sent after STA sends an PS Poll frame */ 401 /*
402 if (skb_queue_empty(&sta->ps_tx_buf)) 402 * Queue frame to be sent after STA wakes up/polls,
403 * but don't set the TIM bit if the driver is blocking
404 * wakeup or poll response transmissions anyway.
405 */
406 if (skb_queue_empty(&sta->ps_tx_buf) &&
407 !(staflags & WLAN_STA_PS_DRIVER))
403 sta_info_set_tim_bit(sta); 408 sta_info_set_tim_bit(sta);
404 409
405 info->control.jiffies = jiffies; 410 info->control.jiffies = jiffies;
@@ -409,7 +414,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
409 return TX_QUEUED; 414 return TX_QUEUED;
410 } 415 }
411#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 416#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
412 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { 417 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
413 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 418 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
414 "set -> send frame\n", tx->dev->name, 419 "set -> send frame\n", tx->dev->name,
415 sta->sta.addr); 420 sta->sta.addr);
@@ -1047,7 +1052,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1047 1052
1048 hdr = (struct ieee80211_hdr *) skb->data; 1053 hdr = (struct ieee80211_hdr *) skb->data;
1049 1054
1050 tx->sta = sta_info_get(local, hdr->addr1); 1055 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1056 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1057 if (!tx->sta)
1058 tx->sta = sta_info_get(local, hdr->addr1);
1051 1059
1052 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1060 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1053 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1061 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1201,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1201 struct sk_buff *skb = tx->skb; 1209 struct sk_buff *skb = tx->skb;
1202 ieee80211_tx_result res = TX_DROP; 1210 ieee80211_tx_result res = TX_DROP;
1203 1211
1204#define CALL_TXH(txh) \ 1212#define CALL_TXH(txh) \
1205 res = txh(tx); \ 1213 do { \
1206 if (res != TX_CONTINUE) \ 1214 res = txh(tx); \
1207 goto txh_done; 1215 if (res != TX_CONTINUE) \
1208 1216 goto txh_done; \
1209 CALL_TXH(ieee80211_tx_h_check_assoc) 1217 } while (0)
1210 CALL_TXH(ieee80211_tx_h_ps_buf) 1218
1211 CALL_TXH(ieee80211_tx_h_select_key) 1219 CALL_TXH(ieee80211_tx_h_check_assoc);
1212 CALL_TXH(ieee80211_tx_h_michael_mic_add) 1220 CALL_TXH(ieee80211_tx_h_ps_buf);
1213 CALL_TXH(ieee80211_tx_h_rate_ctrl) 1221 CALL_TXH(ieee80211_tx_h_select_key);
1214 CALL_TXH(ieee80211_tx_h_misc) 1222 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1215 CALL_TXH(ieee80211_tx_h_sequence) 1223 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1216 CALL_TXH(ieee80211_tx_h_fragment) 1224 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1225 CALL_TXH(ieee80211_tx_h_misc);
1226 CALL_TXH(ieee80211_tx_h_sequence);
1227 CALL_TXH(ieee80211_tx_h_fragment);
1217 /* handlers after fragment must be aware of tx info fragmentation! */ 1228 /* handlers after fragment must be aware of tx info fragmentation! */
1218 CALL_TXH(ieee80211_tx_h_stats) 1229 CALL_TXH(ieee80211_tx_h_stats);
1219 CALL_TXH(ieee80211_tx_h_encrypt) 1230 CALL_TXH(ieee80211_tx_h_encrypt);
1220 CALL_TXH(ieee80211_tx_h_calculate_duration) 1231 CALL_TXH(ieee80211_tx_h_calculate_duration);
1221#undef CALL_TXH 1232#undef CALL_TXH
1222 1233
1223 txh_done: 1234 txh_done:
@@ -1387,6 +1398,30 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1387 return 0; 1398 return 0;
1388} 1399}
1389 1400
1401static bool need_dynamic_ps(struct ieee80211_local *local)
1402{
1403 /* driver doesn't support power save */
1404 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1405 return false;
1406
1407 /* hardware does dynamic power save */
1408 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1409 return false;
1410
1411 /* dynamic power save disabled */
1412 if (local->hw.conf.dynamic_ps_timeout <= 0)
1413 return false;
1414
1415 /* we are scanning, don't enable power save */
1416 if (local->scanning)
1417 return false;
1418
1419 if (!local->ps_sdata)
1420 return false;
1421
1422 return true;
1423}
1424
1390static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1425static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1391 struct sk_buff *skb) 1426 struct sk_buff *skb)
1392{ 1427{
@@ -1397,11 +1432,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1397 int headroom; 1432 int headroom;
1398 bool may_encrypt; 1433 bool may_encrypt;
1399 1434
1400 dev_hold(sdata->dev); 1435 if (need_dynamic_ps(local)) {
1401
1402 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
1403 local->hw.conf.dynamic_ps_timeout > 0 &&
1404 !(local->scanning) && local->ps_sdata) {
1405 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 1436 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1406 ieee80211_stop_queues_by_reason(&local->hw, 1437 ieee80211_stop_queues_by_reason(&local->hw,
1407 IEEE80211_QUEUE_STOP_REASON_PS); 1438 IEEE80211_QUEUE_STOP_REASON_PS);
@@ -1413,7 +1444,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1413 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 1444 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1414 } 1445 }
1415 1446
1416 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1447 rcu_read_lock();
1417 1448
1418 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1449 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1419 int hdrlen; 1450 int hdrlen;
@@ -1437,7 +1468,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1437 * support we will need a different mechanism. 1468 * support we will need a different mechanism.
1438 */ 1469 */
1439 1470
1440 rcu_read_lock();
1441 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1471 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1442 list) { 1472 list) {
1443 if (!netif_running(tmp_sdata->dev)) 1473 if (!netif_running(tmp_sdata->dev))
@@ -1445,14 +1475,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1445 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1475 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
1446 continue; 1476 continue;
1447 if (compare_ether_addr(tmp_sdata->dev->dev_addr, 1477 if (compare_ether_addr(tmp_sdata->dev->dev_addr,
1448 hdr->addr2)) { 1478 hdr->addr2) == 0) {
1449 dev_hold(tmp_sdata->dev);
1450 dev_put(sdata->dev);
1451 sdata = tmp_sdata; 1479 sdata = tmp_sdata;
1452 break; 1480 break;
1453 } 1481 }
1454 } 1482 }
1455 rcu_read_unlock();
1456 } 1483 }
1457 } 1484 }
1458 1485
@@ -1466,7 +1493,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1466 1493
1467 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { 1494 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
1468 dev_kfree_skb(skb); 1495 dev_kfree_skb(skb);
1469 dev_put(sdata->dev); 1496 rcu_read_unlock();
1470 return; 1497 return;
1471 } 1498 }
1472 1499
@@ -1477,13 +1504,13 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1477 !is_multicast_ether_addr(hdr->addr1)) 1504 !is_multicast_ether_addr(hdr->addr1))
1478 if (mesh_nexthop_lookup(skb, sdata)) { 1505 if (mesh_nexthop_lookup(skb, sdata)) {
1479 /* skb queued: don't free */ 1506 /* skb queued: don't free */
1480 dev_put(sdata->dev); 1507 rcu_read_unlock();
1481 return; 1508 return;
1482 } 1509 }
1483 1510
1484 ieee80211_select_queue(local, skb); 1511 ieee80211_select_queue(local, skb);
1485 ieee80211_tx(sdata, skb, false); 1512 ieee80211_tx(sdata, skb, false);
1486 dev_put(sdata->dev); 1513 rcu_read_unlock();
1487} 1514}
1488 1515
1489netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, 1516netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1547,6 +1574,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1547 1574
1548 memset(info, 0, sizeof(*info)); 1575 memset(info, 0, sizeof(*info));
1549 1576
1577 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1578
1550 /* pass the radiotap header up to xmit */ 1579 /* pass the radiotap header up to xmit */
1551 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1580 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1552 return NETDEV_TX_OK; 1581 return NETDEV_TX_OK;
@@ -1585,7 +1614,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1585 const u8 *encaps_data; 1614 const u8 *encaps_data;
1586 int encaps_len, skip_header_bytes; 1615 int encaps_len, skip_header_bytes;
1587 int nh_pos, h_pos; 1616 int nh_pos, h_pos;
1588 struct sta_info *sta; 1617 struct sta_info *sta = NULL;
1589 u32 sta_flags = 0; 1618 u32 sta_flags = 0;
1590 1619
1591 if (unlikely(skb->len < ETH_HLEN)) { 1620 if (unlikely(skb->len < ETH_HLEN)) {
@@ -1602,8 +1631,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1602 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); 1631 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1603 1632
1604 switch (sdata->vif.type) { 1633 switch (sdata->vif.type) {
1605 case NL80211_IFTYPE_AP:
1606 case NL80211_IFTYPE_AP_VLAN: 1634 case NL80211_IFTYPE_AP_VLAN:
1635 rcu_read_lock();
1636 sta = rcu_dereference(sdata->u.vlan.sta);
1637 if (sta) {
1638 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1639 /* RA TA DA SA */
1640 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1641 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1642 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1643 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1644 hdrlen = 30;
1645 sta_flags = get_sta_flags(sta);
1646 }
1647 rcu_read_unlock();
1648 if (sta)
1649 break;
1650 /* fall through */
1651 case NL80211_IFTYPE_AP:
1607 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1652 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1608 /* DA BSSID SA */ 1653 /* DA BSSID SA */
1609 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1654 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1639,21 +1684,25 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1639 /* packet from other interface */ 1684 /* packet from other interface */
1640 struct mesh_path *mppath; 1685 struct mesh_path *mppath;
1641 int is_mesh_mcast = 1; 1686 int is_mesh_mcast = 1;
1642 char *mesh_da; 1687 const u8 *mesh_da;
1643 1688
1644 rcu_read_lock(); 1689 rcu_read_lock();
1645 if (is_multicast_ether_addr(skb->data)) 1690 if (is_multicast_ether_addr(skb->data))
1646 /* DA TA mSA AE:SA */ 1691 /* DA TA mSA AE:SA */
1647 mesh_da = skb->data; 1692 mesh_da = skb->data;
1648 else { 1693 else {
1694 static const u8 bcast[ETH_ALEN] =
1695 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1696
1649 mppath = mpp_path_lookup(skb->data, sdata); 1697 mppath = mpp_path_lookup(skb->data, sdata);
1650 if (mppath) { 1698 if (mppath) {
1651 /* RA TA mDA mSA AE:DA SA */ 1699 /* RA TA mDA mSA AE:DA SA */
1652 mesh_da = mppath->mpp; 1700 mesh_da = mppath->mpp;
1653 is_mesh_mcast = 0; 1701 is_mesh_mcast = 0;
1654 } else 1702 } else {
1655 /* DA TA mSA AE:SA */ 1703 /* DA TA mSA AE:SA */
1656 mesh_da = dev->broadcast; 1704 mesh_da = bcast;
1705 }
1657 } 1706 }
1658 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1707 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1659 mesh_da, dev->dev_addr); 1708 mesh_da, dev->dev_addr);
@@ -1677,12 +1726,21 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1677 break; 1726 break;
1678#endif 1727#endif
1679 case NL80211_IFTYPE_STATION: 1728 case NL80211_IFTYPE_STATION:
1680 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1681 /* BSSID SA DA */
1682 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1729 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1683 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1730 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1684 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1731 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1685 hdrlen = 24; 1732 /* RA TA DA SA */
1733 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1734 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1735 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1736 hdrlen = 30;
1737 } else {
1738 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1739 /* BSSID SA DA */
1740 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1741 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1742 hdrlen = 24;
1743 }
1686 break; 1744 break;
1687 case NL80211_IFTYPE_ADHOC: 1745 case NL80211_IFTYPE_ADHOC:
1688 /* DA SA BSSID */ 1746 /* DA SA BSSID */
@@ -1907,12 +1965,10 @@ void ieee80211_tx_pending(unsigned long data)
1907 } 1965 }
1908 1966
1909 sdata = vif_to_sdata(info->control.vif); 1967 sdata = vif_to_sdata(info->control.vif);
1910 dev_hold(sdata->dev);
1911 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 1968 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1912 flags); 1969 flags);
1913 1970
1914 txok = ieee80211_tx_pending_skb(local, skb); 1971 txok = ieee80211_tx_pending_skb(local, skb);
1915 dev_put(sdata->dev);
1916 if (!txok) 1972 if (!txok)
1917 __skb_queue_head(&local->pending[i], skb); 1973 __skb_queue_head(&local->pending[i], skb);
1918 spin_lock_irqsave(&local->queue_stop_reason_lock, 1974 spin_lock_irqsave(&local->queue_stop_reason_lock,
@@ -1990,8 +2046,9 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
1990 } 2046 }
1991} 2047}
1992 2048
1993struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 2049struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
1994 struct ieee80211_vif *vif) 2050 struct ieee80211_vif *vif,
2051 u16 *tim_offset, u16 *tim_length)
1995{ 2052{
1996 struct ieee80211_local *local = hw_to_local(hw); 2053 struct ieee80211_local *local = hw_to_local(hw);
1997 struct sk_buff *skb = NULL; 2054 struct sk_buff *skb = NULL;
@@ -2008,6 +2065,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2008 2065
2009 sdata = vif_to_sdata(vif); 2066 sdata = vif_to_sdata(vif);
2010 2067
2068 if (tim_offset)
2069 *tim_offset = 0;
2070 if (tim_length)
2071 *tim_length = 0;
2072
2011 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2073 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2012 ap = &sdata->u.ap; 2074 ap = &sdata->u.ap;
2013 beacon = rcu_dereference(ap->beacon); 2075 beacon = rcu_dereference(ap->beacon);
@@ -2043,6 +2105,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2043 spin_unlock_irqrestore(&local->sta_lock, flags); 2105 spin_unlock_irqrestore(&local->sta_lock, flags);
2044 } 2106 }
2045 2107
2108 if (tim_offset)
2109 *tim_offset = beacon->head_len;
2110 if (tim_length)
2111 *tim_length = skb->len - beacon->head_len;
2112
2046 if (beacon->tail) 2113 if (beacon->tail)
2047 memcpy(skb_put(skb, beacon->tail_len), 2114 memcpy(skb_put(skb, beacon->tail_len),
2048 beacon->tail, beacon->tail_len); 2115 beacon->tail, beacon->tail_len);
@@ -2080,7 +2147,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2080 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2147 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2081 memset(mgmt->da, 0xff, ETH_ALEN); 2148 memset(mgmt->da, 0xff, ETH_ALEN);
2082 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2149 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2083 /* BSSID is left zeroed, wildcard value */ 2150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
2084 mgmt->u.beacon.beacon_int = 2151 mgmt->u.beacon.beacon_int =
2085 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2152 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2086 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2153 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2119,7 +2186,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2119 rcu_read_unlock(); 2186 rcu_read_unlock();
2120 return skb; 2187 return skb;
2121} 2188}
2122EXPORT_SYMBOL(ieee80211_beacon_get); 2189EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2123 2190
2124void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2191void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2125 const void *frame, size_t frame_len, 2192 const void *frame, size_t frame_len,
@@ -2214,17 +2281,12 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2214} 2281}
2215EXPORT_SYMBOL(ieee80211_get_buffered_bc); 2282EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2216 2283
2217void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, 2284void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2218 int encrypt)
2219{ 2285{
2220 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2221 skb_set_mac_header(skb, 0); 2286 skb_set_mac_header(skb, 0);
2222 skb_set_network_header(skb, 0); 2287 skb_set_network_header(skb, 0);
2223 skb_set_transport_header(skb, 0); 2288 skb_set_transport_header(skb, 0);
2224 2289
2225 if (!encrypt)
2226 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2227
2228 /* 2290 /*
2229 * The other path calling ieee80211_xmit is from the tasklet, 2291 * The other path calling ieee80211_xmit is from the tasklet,
2230 * and while we can handle concurrent transmissions locking 2292 * and while we can handle concurrent transmissions locking
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index aeb65b3d2295..d09f78bb2442 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -520,9 +520,9 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
520 */ 520 */
521static bool ieee80211_can_queue_work(struct ieee80211_local *local) 521static bool ieee80211_can_queue_work(struct ieee80211_local *local)
522{ 522{
523 if (WARN(local->suspended, "queueing ieee80211 work while " 523 if (WARN(local->suspended && !local->resuming,
524 "going to suspend\n")) 524 "queueing ieee80211 work while going to suspend\n"))
525 return false; 525 return false;
526 526
527 return true; 527 return true;
528} 528}
@@ -666,8 +666,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
666 elems->mesh_id_len = elen; 666 elems->mesh_id_len = elen;
667 break; 667 break;
668 case WLAN_EID_MESH_CONFIG: 668 case WLAN_EID_MESH_CONFIG:
669 elems->mesh_config = pos; 669 if (elen >= sizeof(struct ieee80211_meshconf_ie))
670 elems->mesh_config_len = elen; 670 elems->mesh_config = (void *)pos;
671 break; 671 break;
672 case WLAN_EID_PEER_LINK: 672 case WLAN_EID_PEER_LINK:
673 elems->peer_link = pos; 673 elems->peer_link = pos;
@@ -685,6 +685,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
685 elems->perr = pos; 685 elems->perr = pos;
686 elems->perr_len = elen; 686 elems->perr_len = elen;
687 break; 687 break;
688 case WLAN_EID_RANN:
689 if (elen >= sizeof(struct ieee80211_rann_ie))
690 elems->rann = (void *)pos;
691 break;
688 case WLAN_EID_CHANNEL_SWITCH: 692 case WLAN_EID_CHANNEL_SWITCH:
689 elems->ch_switch_elem = pos; 693 elems->ch_switch_elem = pos;
690 elems->ch_switch_elem_len = elen; 694 elems->ch_switch_elem_len = elen;
@@ -868,17 +872,19 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
868 WARN_ON(err); 872 WARN_ON(err);
869 } 873 }
870 874
871 ieee80211_tx_skb(sdata, skb, 0); 875 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
876 ieee80211_tx_skb(sdata, skb);
872} 877}
873 878
874int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 879int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
875 const u8 *ie, size_t ie_len) 880 const u8 *ie, size_t ie_len,
881 enum ieee80211_band band)
876{ 882{
877 struct ieee80211_supported_band *sband; 883 struct ieee80211_supported_band *sband;
878 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; 884 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL;
879 int i; 885 int i;
880 886
881 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 887 sband = local->hw.wiphy->bands[band];
882 888
883 pos = buffer; 889 pos = buffer;
884 890
@@ -966,9 +972,11 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
966 memcpy(pos, ssid, ssid_len); 972 memcpy(pos, ssid, ssid_len);
967 pos += ssid_len; 973 pos += ssid_len;
968 974
969 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len)); 975 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
976 local->hw.conf.channel->band));
970 977
971 ieee80211_tx_skb(sdata, skb, 0); 978 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
979 ieee80211_tx_skb(sdata, skb);
972} 980}
973 981
974u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 982u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1025,13 +1033,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1025 struct sta_info *sta; 1033 struct sta_info *sta;
1026 unsigned long flags; 1034 unsigned long flags;
1027 int res; 1035 int res;
1028 bool from_suspend = local->suspended;
1029 1036
1030 /* 1037 if (local->suspended)
1031 * We're going to start the hardware, at that point 1038 local->resuming = true;
1032 * we are no longer suspended and can RX frames.
1033 */
1034 local->suspended = false;
1035 1039
1036 /* restart hardware */ 1040 /* restart hardware */
1037 if (local->open_count) { 1041 if (local->open_count) {
@@ -1129,11 +1133,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1129 * If this is for hw restart things are still running. 1133 * If this is for hw restart things are still running.
1130 * We may want to change that later, however. 1134 * We may want to change that later, however.
1131 */ 1135 */
1132 if (!from_suspend) 1136 if (!local->suspended)
1133 return 0; 1137 return 0;
1134 1138
1135#ifdef CONFIG_PM 1139#ifdef CONFIG_PM
1140 /* first set suspended false, then resuming */
1136 local->suspended = false; 1141 local->suspended = false;
1142 mb();
1143 local->resuming = false;
1137 1144
1138 list_for_each_entry(sdata, &local->interfaces, list) { 1145 list_for_each_entry(sdata, &local->interfaces, list) {
1139 switch(sdata->vif.type) { 1146 switch(sdata->vif.type) {
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 8a980f136941..247123fe1a7a 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -281,16 +281,18 @@ bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
281ieee80211_rx_result 281ieee80211_rx_result
282ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) 282ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
283{ 283{
284 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 284 struct sk_buff *skb = rx->skb;
285 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
286 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
285 287
286 if (!ieee80211_is_data(hdr->frame_control) && 288 if (!ieee80211_is_data(hdr->frame_control) &&
287 !ieee80211_is_auth(hdr->frame_control)) 289 !ieee80211_is_auth(hdr->frame_control))
288 return RX_CONTINUE; 290 return RX_CONTINUE;
289 291
290 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 292 if (!(status->flag & RX_FLAG_DECRYPTED)) {
291 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) 293 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
292 return RX_DROP_UNUSABLE; 294 return RX_DROP_UNUSABLE;
293 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { 295 } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
294 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 296 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
295 /* remove ICV */ 297 /* remove ICV */
296 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN); 298 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 70778694877b..5332014cb229 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -85,16 +85,16 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
85 u8 *data, *key = NULL, key_offset; 85 u8 *data, *key = NULL, key_offset;
86 size_t data_len; 86 size_t data_len;
87 unsigned int hdrlen; 87 unsigned int hdrlen;
88 struct ieee80211_hdr *hdr;
89 u8 mic[MICHAEL_MIC_LEN]; 88 u8 mic[MICHAEL_MIC_LEN];
90 struct sk_buff *skb = rx->skb; 89 struct sk_buff *skb = rx->skb;
90 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
91 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
91 int authenticator = 1, wpa_test = 0; 92 int authenticator = 1, wpa_test = 0;
92 93
93 /* No way to verify the MIC if the hardware stripped it */ 94 /* No way to verify the MIC if the hardware stripped it */
94 if (rx->status->flag & RX_FLAG_MMIC_STRIPPED) 95 if (status->flag & RX_FLAG_MMIC_STRIPPED)
95 return RX_CONTINUE; 96 return RX_CONTINUE;
96 97
97 hdr = (struct ieee80211_hdr *)skb->data;
98 if (!rx->key || rx->key->conf.alg != ALG_TKIP || 98 if (!rx->key || rx->key->conf.alg != ALG_TKIP ||
99 !ieee80211_has_protected(hdr->frame_control) || 99 !ieee80211_has_protected(hdr->frame_control) ||
100 !ieee80211_is_data_present(hdr->frame_control)) 100 !ieee80211_is_data_present(hdr->frame_control))
@@ -216,6 +216,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
216 int hdrlen, res, hwaccel = 0, wpa_test = 0; 216 int hdrlen, res, hwaccel = 0, wpa_test = 0;
217 struct ieee80211_key *key = rx->key; 217 struct ieee80211_key *key = rx->key;
218 struct sk_buff *skb = rx->skb; 218 struct sk_buff *skb = rx->skb;
219 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
219 220
220 hdrlen = ieee80211_hdrlen(hdr->frame_control); 221 hdrlen = ieee80211_hdrlen(hdr->frame_control);
221 222
@@ -225,8 +226,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
225 if (!rx->sta || skb->len - hdrlen < 12) 226 if (!rx->sta || skb->len - hdrlen < 12)
226 return RX_DROP_UNUSABLE; 227 return RX_DROP_UNUSABLE;
227 228
228 if (rx->status->flag & RX_FLAG_DECRYPTED) { 229 if (status->flag & RX_FLAG_DECRYPTED) {
229 if (rx->status->flag & RX_FLAG_IV_STRIPPED) { 230 if (status->flag & RX_FLAG_IV_STRIPPED) {
230 /* 231 /*
231 * Hardware took care of all processing, including 232 * Hardware took care of all processing, including
232 * replay protection, and stripped the ICV/IV so 233 * replay protection, and stripped the ICV/IV so
@@ -442,6 +443,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
442 int hdrlen; 443 int hdrlen;
443 struct ieee80211_key *key = rx->key; 444 struct ieee80211_key *key = rx->key;
444 struct sk_buff *skb = rx->skb; 445 struct sk_buff *skb = rx->skb;
446 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
445 u8 pn[CCMP_PN_LEN]; 447 u8 pn[CCMP_PN_LEN];
446 int data_len; 448 int data_len;
447 449
@@ -455,8 +457,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
455 if (!rx->sta || data_len < 0) 457 if (!rx->sta || data_len < 0)
456 return RX_DROP_UNUSABLE; 458 return RX_DROP_UNUSABLE;
457 459
458 if ((rx->status->flag & RX_FLAG_DECRYPTED) && 460 if ((status->flag & RX_FLAG_DECRYPTED) &&
459 (rx->status->flag & RX_FLAG_IV_STRIPPED)) 461 (status->flag & RX_FLAG_IV_STRIPPED))
460 return RX_CONTINUE; 462 return RX_CONTINUE;
461 463
462 ccmp_hdr2pn(pn, skb->data + hdrlen); 464 ccmp_hdr2pn(pn, skb->data + hdrlen);
@@ -466,7 +468,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
466 return RX_DROP_UNUSABLE; 468 return RX_DROP_UNUSABLE;
467 } 469 }
468 470
469 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 471 if (!(status->flag & RX_FLAG_DECRYPTED)) {
470 /* hardware didn't decrypt/verify MIC */ 472 /* hardware didn't decrypt/verify MIC */
471 ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1); 473 ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1);
472 474
@@ -563,6 +565,7 @@ ieee80211_rx_result
563ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) 565ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
564{ 566{
565 struct sk_buff *skb = rx->skb; 567 struct sk_buff *skb = rx->skb;
568 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
566 struct ieee80211_key *key = rx->key; 569 struct ieee80211_key *key = rx->key;
567 struct ieee80211_mmie *mmie; 570 struct ieee80211_mmie *mmie;
568 u8 aad[20], mic[8], ipn[6]; 571 u8 aad[20], mic[8], ipn[6];
@@ -571,8 +574,8 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
571 if (!ieee80211_is_mgmt(hdr->frame_control)) 574 if (!ieee80211_is_mgmt(hdr->frame_control))
572 return RX_CONTINUE; 575 return RX_CONTINUE;
573 576
574 if ((rx->status->flag & RX_FLAG_DECRYPTED) && 577 if ((status->flag & RX_FLAG_DECRYPTED) &&
575 (rx->status->flag & RX_FLAG_IV_STRIPPED)) 578 (status->flag & RX_FLAG_IV_STRIPPED))
576 return RX_CONTINUE; 579 return RX_CONTINUE;
577 580
578 if (skb->len < 24 + sizeof(*mmie)) 581 if (skb->len < 24 + sizeof(*mmie))
@@ -591,7 +594,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
591 return RX_DROP_UNUSABLE; 594 return RX_DROP_UNUSABLE;
592 } 595 }
593 596
594 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 597 if (!(status->flag & RX_FLAG_DECRYPTED)) {
595 /* hardware didn't decrypt/verify MIC */ 598 /* hardware didn't decrypt/verify MIC */
596 bip_aad(skb, aad); 599 bip_aad(skb, aad);
597 ieee80211_aes_cmac(key->u.aes_cmac.tfm, 600 ieee80211_aes_cmac(key->u.aes_cmac.tfm,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8e572d7c08c5..0e98c3282d42 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/netfilter.h> 15#include <linux/netfilter.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/sched.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
19#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
@@ -1356,6 +1357,11 @@ err_stat:
1356 return ret; 1357 return ret;
1357} 1358}
1358 1359
1360s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1361 enum ip_conntrack_dir dir,
1362 u32 seq);
1363EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1364
1359int nf_conntrack_init(struct net *net) 1365int nf_conntrack_init(struct net *net)
1360{ 1366{
1361 int ret; 1367 int ret;
@@ -1373,6 +1379,9 @@ int nf_conntrack_init(struct net *net)
1373 /* For use by REJECT target */ 1379 /* For use by REJECT target */
1374 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1380 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1375 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1381 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1382
1383 /* Howto get NAT offsets */
1384 rcu_assign_pointer(nf_ct_nat_offset, NULL);
1376 } 1385 }
1377 return 0; 1386 return 0;
1378 1387
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 2032dfe25ca8..fdf5d2a1d9b4 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -202,9 +202,9 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
202static inline int expect_matches(const struct nf_conntrack_expect *a, 202static inline int expect_matches(const struct nf_conntrack_expect *a,
203 const struct nf_conntrack_expect *b) 203 const struct nf_conntrack_expect *b)
204{ 204{
205 return a->master == b->master && a->class == b->class 205 return a->master == b->master && a->class == b->class &&
206 && nf_ct_tuple_equal(&a->tuple, &b->tuple) 206 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
207 && nf_ct_tuple_mask_equal(&a->mask, &b->mask); 207 nf_ct_tuple_mask_equal(&a->mask, &b->mask);
208} 208}
209 209
210/* Generally a bad idea to call this: could have matched already. */ 210/* Generally a bad idea to call this: could have matched already. */
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 5509dd1f14cf..38ea7ef3ccd2 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -243,8 +243,8 @@ static int try_epsv_response(const char *data, size_t dlen,
243 /* Three delimiters. */ 243 /* Three delimiters. */
244 if (dlen <= 3) return 0; 244 if (dlen <= 3) return 0;
245 delim = data[0]; 245 delim = data[0];
246 if (isdigit(delim) || delim < 33 || delim > 126 246 if (isdigit(delim) || delim < 33 || delim > 126 ||
247 || data[1] != delim || data[2] != delim) 247 data[1] != delim || data[2] != delim)
248 return 0; 248 return 0;
249 249
250 return get_port(data, 3, dlen, delim, &cmd->u.tcp.port); 250 return get_port(data, 3, dlen, delim, &cmd->u.tcp.port);
@@ -366,8 +366,8 @@ static int help(struct sk_buff *skb,
366 typeof(nf_nat_ftp_hook) nf_nat_ftp; 366 typeof(nf_nat_ftp_hook) nf_nat_ftp;
367 367
368 /* Until there's been traffic both ways, don't look in packets. */ 368 /* Until there's been traffic both ways, don't look in packets. */
369 if (ctinfo != IP_CT_ESTABLISHED 369 if (ctinfo != IP_CT_ESTABLISHED &&
370 && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) { 370 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
371 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); 371 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
372 return NF_ACCEPT; 372 return NF_ACCEPT;
373 } 373 }
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 1b816a2ea813..98916ef26f5d 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -384,7 +384,7 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
384}; 384};
385 385
386/* this module per-net specifics */ 386/* this module per-net specifics */
387static int dccp_net_id; 387static int dccp_net_id __read_mostly;
388struct dccp_net { 388struct dccp_net {
389 int dccp_loose; 389 int dccp_loose;
390 unsigned int dccp_timeout[CT_DCCP_MAX + 1]; 390 unsigned int dccp_timeout[CT_DCCP_MAX + 1];
@@ -810,12 +810,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
810 810
811static __net_init int dccp_net_init(struct net *net) 811static __net_init int dccp_net_init(struct net *net)
812{ 812{
813 struct dccp_net *dn; 813 struct dccp_net *dn = dccp_pernet(net);
814 int err;
815
816 dn = kmalloc(sizeof(*dn), GFP_KERNEL);
817 if (!dn)
818 return -ENOMEM;
819 814
820 /* default values */ 815 /* default values */
821 dn->dccp_loose = 1; 816 dn->dccp_loose = 1;
@@ -827,16 +822,11 @@ static __net_init int dccp_net_init(struct net *net)
827 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 822 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
828 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 823 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
829 824
830 err = net_assign_generic(net, dccp_net_id, dn);
831 if (err)
832 goto out;
833
834#ifdef CONFIG_SYSCTL 825#ifdef CONFIG_SYSCTL
835 err = -ENOMEM;
836 dn->sysctl_table = kmemdup(dccp_sysctl_table, 826 dn->sysctl_table = kmemdup(dccp_sysctl_table,
837 sizeof(dccp_sysctl_table), GFP_KERNEL); 827 sizeof(dccp_sysctl_table), GFP_KERNEL);
838 if (!dn->sysctl_table) 828 if (!dn->sysctl_table)
839 goto out; 829 return -ENOMEM;
840 830
841 dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST]; 831 dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
842 dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND]; 832 dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
@@ -851,15 +841,11 @@ static __net_init int dccp_net_init(struct net *net)
851 nf_net_netfilter_sysctl_path, dn->sysctl_table); 841 nf_net_netfilter_sysctl_path, dn->sysctl_table);
852 if (!dn->sysctl_header) { 842 if (!dn->sysctl_header) {
853 kfree(dn->sysctl_table); 843 kfree(dn->sysctl_table);
854 goto out; 844 return -ENOMEM;
855 } 845 }
856#endif 846#endif
857 847
858 return 0; 848 return 0;
859
860out:
861 kfree(dn);
862 return err;
863} 849}
864 850
865static __net_exit void dccp_net_exit(struct net *net) 851static __net_exit void dccp_net_exit(struct net *net)
@@ -869,21 +855,20 @@ static __net_exit void dccp_net_exit(struct net *net)
869 unregister_net_sysctl_table(dn->sysctl_header); 855 unregister_net_sysctl_table(dn->sysctl_header);
870 kfree(dn->sysctl_table); 856 kfree(dn->sysctl_table);
871#endif 857#endif
872 kfree(dn);
873
874 net_assign_generic(net, dccp_net_id, NULL);
875} 858}
876 859
877static struct pernet_operations dccp_net_ops = { 860static struct pernet_operations dccp_net_ops = {
878 .init = dccp_net_init, 861 .init = dccp_net_init,
879 .exit = dccp_net_exit, 862 .exit = dccp_net_exit,
863 .id = &dccp_net_id,
864 .size = sizeof(struct dccp_net),
880}; 865};
881 866
882static int __init nf_conntrack_proto_dccp_init(void) 867static int __init nf_conntrack_proto_dccp_init(void)
883{ 868{
884 int err; 869 int err;
885 870
886 err = register_pernet_gen_subsys(&dccp_net_id, &dccp_net_ops); 871 err = register_pernet_subsys(&dccp_net_ops);
887 if (err < 0) 872 if (err < 0)
888 goto err1; 873 goto err1;
889 874
@@ -899,14 +884,14 @@ static int __init nf_conntrack_proto_dccp_init(void)
899err3: 884err3:
900 nf_conntrack_l4proto_unregister(&dccp_proto4); 885 nf_conntrack_l4proto_unregister(&dccp_proto4);
901err2: 886err2:
902 unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops); 887 unregister_pernet_subsys(&dccp_net_ops);
903err1: 888err1:
904 return err; 889 return err;
905} 890}
906 891
907static void __exit nf_conntrack_proto_dccp_fini(void) 892static void __exit nf_conntrack_proto_dccp_fini(void)
908{ 893{
909 unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops); 894 unregister_pernet_subsys(&dccp_net_ops);
910 nf_conntrack_l4proto_unregister(&dccp_proto6); 895 nf_conntrack_l4proto_unregister(&dccp_proto6);
911 nf_conntrack_l4proto_unregister(&dccp_proto4); 896 nf_conntrack_l4proto_unregister(&dccp_proto4);
912} 897}
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a54a0af0edba..c99cfba64ddc 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -43,7 +43,7 @@
43#define GRE_TIMEOUT (30 * HZ) 43#define GRE_TIMEOUT (30 * HZ)
44#define GRE_STREAM_TIMEOUT (180 * HZ) 44#define GRE_STREAM_TIMEOUT (180 * HZ)
45 45
46static int proto_gre_net_id; 46static int proto_gre_net_id __read_mostly;
47struct netns_proto_gre { 47struct netns_proto_gre {
48 rwlock_t keymap_lock; 48 rwlock_t keymap_lock;
49 struct list_head keymap_list; 49 struct list_head keymap_list;
@@ -300,32 +300,24 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
300 300
301static int proto_gre_net_init(struct net *net) 301static int proto_gre_net_init(struct net *net)
302{ 302{
303 struct netns_proto_gre *net_gre; 303 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
304 int rv;
305 304
306 net_gre = kmalloc(sizeof(struct netns_proto_gre), GFP_KERNEL);
307 if (!net_gre)
308 return -ENOMEM;
309 rwlock_init(&net_gre->keymap_lock); 305 rwlock_init(&net_gre->keymap_lock);
310 INIT_LIST_HEAD(&net_gre->keymap_list); 306 INIT_LIST_HEAD(&net_gre->keymap_list);
311 307
312 rv = net_assign_generic(net, proto_gre_net_id, net_gre); 308 return 0;
313 if (rv < 0)
314 kfree(net_gre);
315 return rv;
316} 309}
317 310
318static void proto_gre_net_exit(struct net *net) 311static void proto_gre_net_exit(struct net *net)
319{ 312{
320 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
321
322 nf_ct_gre_keymap_flush(net); 313 nf_ct_gre_keymap_flush(net);
323 kfree(net_gre);
324} 314}
325 315
326static struct pernet_operations proto_gre_net_ops = { 316static struct pernet_operations proto_gre_net_ops = {
327 .init = proto_gre_net_init, 317 .init = proto_gre_net_init,
328 .exit = proto_gre_net_exit, 318 .exit = proto_gre_net_exit,
319 .id = &proto_gre_net_id,
320 .size = sizeof(struct netns_proto_gre),
329}; 321};
330 322
331static int __init nf_ct_proto_gre_init(void) 323static int __init nf_ct_proto_gre_init(void)
@@ -335,7 +327,7 @@ static int __init nf_ct_proto_gre_init(void)
335 rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4); 327 rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
336 if (rv < 0) 328 if (rv < 0)
337 return rv; 329 return rv;
338 rv = register_pernet_gen_subsys(&proto_gre_net_id, &proto_gre_net_ops); 330 rv = register_pernet_subsys(&proto_gre_net_ops);
339 if (rv < 0) 331 if (rv < 0)
340 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); 332 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
341 return rv; 333 return rv;
@@ -344,7 +336,7 @@ static int __init nf_ct_proto_gre_init(void)
344static void __exit nf_ct_proto_gre_fini(void) 336static void __exit nf_ct_proto_gre_fini(void)
345{ 337{
346 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); 338 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
347 unregister_pernet_gen_subsys(proto_gre_net_id, &proto_gre_net_ops); 339 unregister_pernet_subsys(&proto_gre_net_ops);
348} 340}
349 341
350module_init(nf_ct_proto_gre_init); 342module_init(nf_ct_proto_gre_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 9cc6b5cb06af..37a8c74be619 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -492,6 +492,21 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
492 } 492 }
493} 493}
494 494
495#ifdef CONFIG_NF_NAT_NEEDED
496static inline s16 nat_offset(const struct nf_conn *ct,
497 enum ip_conntrack_dir dir,
498 u32 seq)
499{
500 typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
501
502 return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
503}
504#define NAT_OFFSET(pf, ct, dir, seq) \
505 (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
506#else
507#define NAT_OFFSET(pf, ct, dir, seq) 0
508#endif
509
495static bool tcp_in_window(const struct nf_conn *ct, 510static bool tcp_in_window(const struct nf_conn *ct,
496 struct ip_ct_tcp *state, 511 struct ip_ct_tcp *state,
497 enum ip_conntrack_dir dir, 512 enum ip_conntrack_dir dir,
@@ -506,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
506 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 521 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
507 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 522 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
508 __u32 seq, ack, sack, end, win, swin; 523 __u32 seq, ack, sack, end, win, swin;
524 s16 receiver_offset;
509 bool res; 525 bool res;
510 526
511 /* 527 /*
@@ -519,11 +535,16 @@ static bool tcp_in_window(const struct nf_conn *ct,
519 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) 535 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
520 tcp_sack(skb, dataoff, tcph, &sack); 536 tcp_sack(skb, dataoff, tcph, &sack);
521 537
538 /* Take into account NAT sequence number mangling */
539 receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
540 ack -= receiver_offset;
541 sack -= receiver_offset;
542
522 pr_debug("tcp_in_window: START\n"); 543 pr_debug("tcp_in_window: START\n");
523 pr_debug("tcp_in_window: "); 544 pr_debug("tcp_in_window: ");
524 nf_ct_dump_tuple(tuple); 545 nf_ct_dump_tuple(tuple);
525 pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n", 546 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
526 seq, ack, sack, win, end); 547 seq, ack, receiver_offset, sack, receiver_offset, win, end);
527 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 548 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
528 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 549 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
529 sender->td_end, sender->td_maxend, sender->td_maxwin, 550 sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -613,8 +634,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
613 634
614 pr_debug("tcp_in_window: "); 635 pr_debug("tcp_in_window: ");
615 nf_ct_dump_tuple(tuple); 636 nf_ct_dump_tuple(tuple);
616 pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n", 637 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
617 seq, ack, sack, win, end); 638 seq, ack, receiver_offset, sack, receiver_offset, win, end);
618 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 639 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
619 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 640 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
620 sender->td_end, sender->td_maxend, sender->td_maxwin, 641 sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -700,7 +721,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
700 before(seq, sender->td_maxend + 1) ? 721 before(seq, sender->td_maxend + 1) ?
701 after(end, sender->td_end - receiver->td_maxwin - 1) ? 722 after(end, sender->td_end - receiver->td_maxwin - 1) ?
702 before(sack, receiver->td_end + 1) ? 723 before(sack, receiver->td_end + 1) ?
703 after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG" 724 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
704 : "ACK is under the lower bound (possible overly delayed ACK)" 725 : "ACK is under the lower bound (possible overly delayed ACK)"
705 : "ACK is over the upper bound (ACKed data not seen yet)" 726 : "ACK is over the upper bound (ACKed data not seen yet)"
706 : "SEQ is under the lower bound (already ACKed data retransmitted)" 727 : "SEQ is under the lower bound (already ACKed data retransmitted)"
@@ -715,39 +736,6 @@ static bool tcp_in_window(const struct nf_conn *ct,
715 return res; 736 return res;
716} 737}
717 738
718#ifdef CONFIG_NF_NAT_NEEDED
719/* Update sender->td_end after NAT successfully mangled the packet */
720/* Caller must linearize skb at tcp header. */
721void nf_conntrack_tcp_update(const struct sk_buff *skb,
722 unsigned int dataoff,
723 struct nf_conn *ct, int dir,
724 s16 offset)
725{
726 const struct tcphdr *tcph = (const void *)skb->data + dataoff;
727 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
728 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[!dir];
729 __u32 end;
730
731 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
732
733 spin_lock_bh(&ct->lock);
734 /*
735 * We have to worry for the ack in the reply packet only...
736 */
737 if (ct->proto.tcp.seen[dir].td_end + offset == end)
738 ct->proto.tcp.seen[dir].td_end = end;
739 ct->proto.tcp.last_end = end;
740 spin_unlock_bh(&ct->lock);
741 pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
742 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
743 sender->td_end, sender->td_maxend, sender->td_maxwin,
744 sender->td_scale,
745 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
746 receiver->td_scale);
747}
748EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
749#endif
750
751#define TH_FIN 0x01 739#define TH_FIN 0x01
752#define TH_SYN 0x02 740#define TH_SYN 0x02
753#define TH_RST 0x04 741#define TH_RST 0x04
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index c93494fef8ef..d65d3481919c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -128,9 +128,8 @@ EXPORT_SYMBOL(nf_log_packet);
128 128
129#ifdef CONFIG_PROC_FS 129#ifdef CONFIG_PROC_FS
130static void *seq_start(struct seq_file *seq, loff_t *pos) 130static void *seq_start(struct seq_file *seq, loff_t *pos)
131 __acquires(RCU)
132{ 131{
133 rcu_read_lock(); 132 mutex_lock(&nf_log_mutex);
134 133
135 if (*pos >= ARRAY_SIZE(nf_loggers)) 134 if (*pos >= ARRAY_SIZE(nf_loggers))
136 return NULL; 135 return NULL;
@@ -149,9 +148,8 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
149} 148}
150 149
151static void seq_stop(struct seq_file *s, void *v) 150static void seq_stop(struct seq_file *s, void *v)
152 __releases(RCU)
153{ 151{
154 rcu_read_unlock(); 152 mutex_unlock(&nf_log_mutex);
155} 153}
156 154
157static int seq_show(struct seq_file *s, void *v) 155static int seq_show(struct seq_file *s, void *v)
@@ -161,7 +159,7 @@ static int seq_show(struct seq_file *s, void *v)
161 struct nf_logger *t; 159 struct nf_logger *t;
162 int ret; 160 int ret;
163 161
164 logger = rcu_dereference(nf_loggers[*pos]); 162 logger = nf_loggers[*pos];
165 163
166 if (!logger) 164 if (!logger)
167 ret = seq_printf(s, "%2lld NONE (", *pos); 165 ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -171,22 +169,16 @@ static int seq_show(struct seq_file *s, void *v)
171 if (ret < 0) 169 if (ret < 0)
172 return ret; 170 return ret;
173 171
174 mutex_lock(&nf_log_mutex);
175 list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) { 172 list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
176 ret = seq_printf(s, "%s", t->name); 173 ret = seq_printf(s, "%s", t->name);
177 if (ret < 0) { 174 if (ret < 0)
178 mutex_unlock(&nf_log_mutex);
179 return ret; 175 return ret;
180 }
181 if (&t->list[*pos] != nf_loggers_l[*pos].prev) { 176 if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
182 ret = seq_printf(s, ","); 177 ret = seq_printf(s, ",");
183 if (ret < 0) { 178 if (ret < 0)
184 mutex_unlock(&nf_log_mutex);
185 return ret; 179 return ret;
186 }
187 } 180 }
188 } 181 }
189 mutex_unlock(&nf_log_mutex);
190 182
191 return seq_printf(s, ")\n"); 183 return seq_printf(s, ")\n");
192} 184}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3aa66b2f9e87..9de0470d557e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -677,7 +677,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
677 struct hlist_head *head = &instance_table[i]; 677 struct hlist_head *head = &instance_table[i];
678 678
679 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 679 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
680 if ((n->net == &init_net) && 680 if ((net_eq(n->net, &init_net)) &&
681 (n->pid == inst->peer_pid)) 681 (n->pid == inst->peer_pid))
682 __instance_destroy(inst); 682 __instance_destroy(inst);
683 } 683 }
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 680980954395..38f03f75a636 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -103,7 +103,7 @@ static int count_them(struct xt_connlimit_data *data,
103 const struct nf_conntrack_tuple *tuple, 103 const struct nf_conntrack_tuple *tuple,
104 const union nf_inet_addr *addr, 104 const union nf_inet_addr *addr,
105 const union nf_inet_addr *mask, 105 const union nf_inet_addr *mask,
106 const struct xt_match *match) 106 u_int8_t family)
107{ 107{
108 const struct nf_conntrack_tuple_hash *found; 108 const struct nf_conntrack_tuple_hash *found;
109 struct xt_connlimit_conn *conn; 109 struct xt_connlimit_conn *conn;
@@ -113,8 +113,7 @@ static int count_them(struct xt_connlimit_data *data,
113 bool addit = true; 113 bool addit = true;
114 int matches = 0; 114 int matches = 0;
115 115
116 116 if (family == NFPROTO_IPV6)
117 if (match->family == NFPROTO_IPV6)
118 hash = &data->iphash[connlimit_iphash6(addr, mask)]; 117 hash = &data->iphash[connlimit_iphash6(addr, mask)];
119 else 118 else
120 hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)]; 119 hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)];
@@ -157,8 +156,7 @@ static int count_them(struct xt_connlimit_data *data,
157 continue; 156 continue;
158 } 157 }
159 158
160 if (same_source_net(addr, mask, &conn->tuple.src.u3, 159 if (same_source_net(addr, mask, &conn->tuple.src.u3, family))
161 match->family))
162 /* same source network -> be counted! */ 160 /* same source network -> be counted! */
163 ++matches; 161 ++matches;
164 nf_ct_put(found_ct); 162 nf_ct_put(found_ct);
@@ -207,7 +205,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
207 205
208 spin_lock_bh(&info->data->lock); 206 spin_lock_bh(&info->data->lock);
209 connections = count_them(info->data, tuple_ptr, &addr, 207 connections = count_them(info->data, tuple_ptr, &addr,
210 &info->mask, par->match); 208 &info->mask, par->family);
211 spin_unlock_bh(&info->data->lock); 209 spin_unlock_bh(&info->data->lock);
212 210
213 if (connections < 0) { 211 if (connections < 0) {
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2e8089ecd0af..2773be6a71dd 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -112,7 +112,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
112 112
113 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 113 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
114 if (priv == NULL) 114 if (priv == NULL)
115 return -ENOMEM; 115 return false;
116 116
117 /* For SMP, we only want to use one set of state. */ 117 /* For SMP, we only want to use one set of state. */
118 r->master = priv; 118 r->master = priv;
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 63e190504656..4d1a41bbd5d7 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -118,7 +118,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
118{ 118{
119 struct xt_osf_user_finger *f; 119 struct xt_osf_user_finger *f;
120 struct xt_osf_finger *sf; 120 struct xt_osf_finger *sf;
121 int err = ENOENT; 121 int err = -ENOENT;
122 122
123 if (!osf_attrs[OSF_ATTR_FINGER]) 123 if (!osf_attrs[OSF_ATTR_FINGER])
124 return -EINVAL; 124 return -EINVAL;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index fb357f010189..98ed22ee2ff4 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -472,13 +472,12 @@ int netlbl_unlhsh_add(struct net *net,
472 472
473 rcu_read_lock(); 473 rcu_read_lock();
474 if (dev_name != NULL) { 474 if (dev_name != NULL) {
475 dev = dev_get_by_name(net, dev_name); 475 dev = dev_get_by_name_rcu(net, dev_name);
476 if (dev == NULL) { 476 if (dev == NULL) {
477 ret_val = -ENODEV; 477 ret_val = -ENODEV;
478 goto unlhsh_add_return; 478 goto unlhsh_add_return;
479 } 479 }
480 ifindex = dev->ifindex; 480 ifindex = dev->ifindex;
481 dev_put(dev);
482 iface = netlbl_unlhsh_search_iface(ifindex); 481 iface = netlbl_unlhsh_search_iface(ifindex);
483 } else { 482 } else {
484 ifindex = 0; 483 ifindex = 0;
@@ -737,13 +736,12 @@ int netlbl_unlhsh_remove(struct net *net,
737 736
738 rcu_read_lock(); 737 rcu_read_lock();
739 if (dev_name != NULL) { 738 if (dev_name != NULL) {
740 dev = dev_get_by_name(net, dev_name); 739 dev = dev_get_by_name_rcu(net, dev_name);
741 if (dev == NULL) { 740 if (dev == NULL) {
742 ret_val = -ENODEV; 741 ret_val = -ENODEV;
743 goto unlhsh_remove_return; 742 goto unlhsh_remove_return;
744 } 743 }
745 iface = netlbl_unlhsh_search_iface(dev->ifindex); 744 iface = netlbl_unlhsh_search_iface(dev->ifindex);
746 dev_put(dev);
747 } else 745 } else
748 iface = rcu_dereference(netlbl_unlhsh_def); 746 iface = rcu_dereference(netlbl_unlhsh_def);
749 if (iface == NULL) { 747 if (iface == NULL) {
@@ -1552,7 +1550,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
1552 struct netlbl_unlhsh_iface *iface; 1550 struct netlbl_unlhsh_iface *iface;
1553 1551
1554 rcu_read_lock(); 1552 rcu_read_lock();
1555 iface = netlbl_unlhsh_search_iface_def(skb->iif); 1553 iface = netlbl_unlhsh_search_iface_def(skb->skb_iif);
1556 if (iface == NULL) 1554 if (iface == NULL)
1557 goto unlabel_getattr_nolabel; 1555 goto unlabel_getattr_nolabel;
1558 switch (family) { 1556 switch (family) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0cd2d8829313..a4957bf2ca60 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -428,7 +428,8 @@ static int __netlink_create(struct net *net, struct socket *sock,
428 return 0; 428 return 0;
429} 429}
430 430
431static int netlink_create(struct net *net, struct socket *sock, int protocol) 431static int netlink_create(struct net *net, struct socket *sock, int protocol,
432 int kern)
432{ 433{
433 struct module *module = NULL; 434 struct module *module = NULL;
434 struct mutex *cb_mutex; 435 struct mutex *cb_mutex;
@@ -497,7 +498,7 @@ static int netlink_release(struct socket *sock)
497 498
498 skb_queue_purge(&sk->sk_write_queue); 499 skb_queue_purge(&sk->sk_write_queue);
499 500
500 if (nlk->pid && !nlk->subscriptions) { 501 if (nlk->pid) {
501 struct netlink_notify n = { 502 struct netlink_notify n = {
502 .net = sock_net(sk), 503 .net = sock_net(sk),
503 .protocol = sk->sk_protocol, 504 .protocol = sk->sk_protocol,
@@ -707,7 +708,7 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
707{ 708{
708 struct sock *sk = sock->sk; 709 struct sock *sk = sock->sk;
709 struct netlink_sock *nlk = nlk_sk(sk); 710 struct netlink_sock *nlk = nlk_sk(sk);
710 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 711 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
711 712
712 nladdr->nl_family = AF_NETLINK; 713 nladdr->nl_family = AF_NETLINK;
713 nladdr->nl_pad = 0; 714 nladdr->nl_pad = 0;
@@ -1091,7 +1092,7 @@ static inline int do_one_set_err(struct sock *sk,
1091 if (sk == p->exclude_sk) 1092 if (sk == p->exclude_sk)
1092 goto out; 1093 goto out;
1093 1094
1094 if (sock_net(sk) != sock_net(p->exclude_sk)) 1095 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1095 goto out; 1096 goto out;
1096 1097
1097 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 1098 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 281fa597cae5..71604c6613b5 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -425,12 +425,13 @@ static struct proto nr_proto = {
425 .obj_size = sizeof(struct nr_sock), 425 .obj_size = sizeof(struct nr_sock),
426}; 426};
427 427
428static int nr_create(struct net *net, struct socket *sock, int protocol) 428static int nr_create(struct net *net, struct socket *sock, int protocol,
429 int kern)
429{ 430{
430 struct sock *sk; 431 struct sock *sk;
431 struct nr_sock *nr; 432 struct nr_sock *nr;
432 433
433 if (net != &init_net) 434 if (!net_eq(net, &init_net))
434 return -EAFNOSUPPORT; 435 return -EAFNOSUPPORT;
435 436
436 if (sock->type != SOCK_SEQPACKET || protocol != 0) 437 if (sock->type != SOCK_SEQPACKET || protocol != 0)
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 4eb1ac9a7679..aacba76070fc 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -597,15 +597,15 @@ struct net_device *nr_dev_first(void)
597{ 597{
598 struct net_device *dev, *first = NULL; 598 struct net_device *dev, *first = NULL;
599 599
600 read_lock(&dev_base_lock); 600 rcu_read_lock();
601 for_each_netdev(&init_net, dev) { 601 for_each_netdev_rcu(&init_net, dev) {
602 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) 602 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
603 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 603 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
604 first = dev; 604 first = dev;
605 } 605 }
606 if (first) 606 if (first)
607 dev_hold(first); 607 dev_hold(first);
608 read_unlock(&dev_base_lock); 608 rcu_read_unlock();
609 609
610 return first; 610 return first;
611} 611}
@@ -617,16 +617,17 @@ struct net_device *nr_dev_get(ax25_address *addr)
617{ 617{
618 struct net_device *dev; 618 struct net_device *dev;
619 619
620 read_lock(&dev_base_lock); 620 rcu_read_lock();
621 for_each_netdev(&init_net, dev) { 621 for_each_netdev_rcu(&init_net, dev) {
622 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { 622 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
623 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
623 dev_hold(dev); 624 dev_hold(dev);
624 goto out; 625 goto out;
625 } 626 }
626 } 627 }
627 dev = NULL; 628 dev = NULL;
628out: 629out:
629 read_unlock(&dev_base_lock); 630 rcu_read_unlock();
630 return dev; 631 return dev;
631} 632}
632 633
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33e68f20ec61..020562164b56 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -365,7 +365,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
365 if (skb->pkt_type == PACKET_LOOPBACK) 365 if (skb->pkt_type == PACKET_LOOPBACK)
366 goto out; 366 goto out;
367 367
368 if (dev_net(dev) != sock_net(sk)) 368 if (!net_eq(dev_net(dev), sock_net(sk)))
369 goto out; 369 goto out;
370 370
371 skb = skb_share_check(skb, GFP_ATOMIC); 371 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -437,7 +437,8 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
437 */ 437 */
438 438
439 saddr->spkt_device[13] = 0; 439 saddr->spkt_device[13] = 0;
440 dev = dev_get_by_name(sock_net(sk), saddr->spkt_device); 440 rcu_read_lock();
441 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
441 err = -ENODEV; 442 err = -ENODEV;
442 if (dev == NULL) 443 if (dev == NULL)
443 goto out_unlock; 444 goto out_unlock;
@@ -500,14 +501,13 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
500 */ 501 */
501 502
502 dev_queue_xmit(skb); 503 dev_queue_xmit(skb);
503 dev_put(dev); 504 rcu_read_unlock();
504 return len; 505 return len;
505 506
506out_free: 507out_free:
507 kfree_skb(skb); 508 kfree_skb(skb);
508out_unlock: 509out_unlock:
509 if (dev) 510 rcu_read_unlock();
510 dev_put(dev);
511 return err; 511 return err;
512} 512}
513 513
@@ -553,7 +553,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
553 sk = pt->af_packet_priv; 553 sk = pt->af_packet_priv;
554 po = pkt_sk(sk); 554 po = pkt_sk(sk);
555 555
556 if (dev_net(dev) != sock_net(sk)) 556 if (!net_eq(dev_net(dev), sock_net(sk)))
557 goto drop; 557 goto drop;
558 558
559 skb->dev = dev; 559 skb->dev = dev;
@@ -674,7 +674,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
674 sk = pt->af_packet_priv; 674 sk = pt->af_packet_priv;
675 po = pkt_sk(sk); 675 po = pkt_sk(sk);
676 676
677 if (dev_net(dev) != sock_net(sk)) 677 if (!net_eq(dev_net(dev), sock_net(sk)))
678 goto drop; 678 goto drop;
679 679
680 if (dev->header_ops) { 680 if (dev->header_ops) {
@@ -984,10 +984,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
984 goto out_put; 984 goto out_put;
985 985
986 size_max = po->tx_ring.frame_size 986 size_max = po->tx_ring.frame_size
987 - sizeof(struct skb_shared_info) 987 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
988 - po->tp_hdrlen
989 - LL_ALLOCATED_SPACE(dev)
990 - sizeof(struct sockaddr_ll);
991 988
992 if (size_max > dev->mtu + reserve) 989 if (size_max > dev->mtu + reserve)
993 size_max = dev->mtu + reserve; 990 size_max = dev->mtu + reserve;
@@ -1037,9 +1034,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1037 goto out_xmit; 1034 goto out_xmit;
1038 packet_increment_head(&po->tx_ring); 1035 packet_increment_head(&po->tx_ring);
1039 len_sum += tp_len; 1036 len_sum += tp_len;
1040 } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT)) 1037 } while (likely((ph != NULL) ||
1041 && (atomic_read(&po->tx_ring.pending)))) 1038 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1042 ); 1039 (atomic_read(&po->tx_ring.pending))))
1040 );
1043 1041
1044 err = len_sum; 1042 err = len_sum;
1045 goto out_put; 1043 goto out_put;
@@ -1347,7 +1345,8 @@ static struct proto packet_proto = {
1347 * Create a packet of type SOCK_PACKET. 1345 * Create a packet of type SOCK_PACKET.
1348 */ 1346 */
1349 1347
1350static int packet_create(struct net *net, struct socket *sock, int protocol) 1348static int packet_create(struct net *net, struct socket *sock, int protocol,
1349 int kern)
1351{ 1350{
1352 struct sock *sk; 1351 struct sock *sk;
1353 struct packet_sock *po; 1352 struct packet_sock *po;
@@ -1521,12 +1520,13 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1521 return -EOPNOTSUPP; 1520 return -EOPNOTSUPP;
1522 1521
1523 uaddr->sa_family = AF_PACKET; 1522 uaddr->sa_family = AF_PACKET;
1524 dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex); 1523 rcu_read_lock();
1525 if (dev) { 1524 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1525 if (dev)
1526 strlcpy(uaddr->sa_data, dev->name, 15); 1526 strlcpy(uaddr->sa_data, dev->name, 15);
1527 dev_put(dev); 1527 else
1528 } else
1529 memset(uaddr->sa_data, 0, 14); 1528 memset(uaddr->sa_data, 0, 14);
1529 rcu_read_unlock();
1530 *uaddr_len = sizeof(*uaddr); 1530 *uaddr_len = sizeof(*uaddr);
1531 1531
1532 return 0; 1532 return 0;
@@ -1538,7 +1538,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1538 struct net_device *dev; 1538 struct net_device *dev;
1539 struct sock *sk = sock->sk; 1539 struct sock *sk = sock->sk;
1540 struct packet_sock *po = pkt_sk(sk); 1540 struct packet_sock *po = pkt_sk(sk);
1541 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 1541 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1542 1542
1543 if (peer) 1543 if (peer)
1544 return -EOPNOTSUPP; 1544 return -EOPNOTSUPP;
@@ -1546,16 +1546,17 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1546 sll->sll_family = AF_PACKET; 1546 sll->sll_family = AF_PACKET;
1547 sll->sll_ifindex = po->ifindex; 1547 sll->sll_ifindex = po->ifindex;
1548 sll->sll_protocol = po->num; 1548 sll->sll_protocol = po->num;
1549 dev = dev_get_by_index(sock_net(sk), po->ifindex); 1549 rcu_read_lock();
1550 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1550 if (dev) { 1551 if (dev) {
1551 sll->sll_hatype = dev->type; 1552 sll->sll_hatype = dev->type;
1552 sll->sll_halen = dev->addr_len; 1553 sll->sll_halen = dev->addr_len;
1553 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 1554 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1554 dev_put(dev);
1555 } else { 1555 } else {
1556 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 1556 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1557 sll->sll_halen = 0; 1557 sll->sll_halen = 0;
1558 } 1558 }
1559 rcu_read_unlock();
1559 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 1560 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1560 1561
1561 return 0; 1562 return 0;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 66737aa995ea..526d0273991a 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -35,7 +35,6 @@
35 35
36/* Transport protocol registration */ 36/* Transport protocol registration */
37static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 37static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
38static DEFINE_SPINLOCK(proto_tab_lock);
39 38
40static struct phonet_protocol *phonet_proto_get(int protocol) 39static struct phonet_protocol *phonet_proto_get(int protocol)
41{ 40{
@@ -44,11 +43,11 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
44 if (protocol >= PHONET_NPROTO) 43 if (protocol >= PHONET_NPROTO)
45 return NULL; 44 return NULL;
46 45
47 spin_lock(&proto_tab_lock); 46 rcu_read_lock();
48 pp = proto_tab[protocol]; 47 pp = rcu_dereference(proto_tab[protocol]);
49 if (pp && !try_module_get(pp->prot->owner)) 48 if (pp && !try_module_get(pp->prot->owner))
50 pp = NULL; 49 pp = NULL;
51 spin_unlock(&proto_tab_lock); 50 rcu_read_unlock();
52 51
53 return pp; 52 return pp;
54} 53}
@@ -60,7 +59,8 @@ static inline void phonet_proto_put(struct phonet_protocol *pp)
60 59
61/* protocol family functions */ 60/* protocol family functions */
62 61
63static int pn_socket_create(struct net *net, struct socket *sock, int protocol) 62static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
63 int kern)
64{ 64{
65 struct sock *sk; 65 struct sock *sk;
66 struct pn_sock *pn; 66 struct pn_sock *pn;
@@ -438,6 +438,8 @@ static struct packet_type phonet_packet_type __read_mostly = {
438 .func = phonet_rcv, 438 .func = phonet_rcv,
439}; 439};
440 440
441static DEFINE_MUTEX(proto_tab_lock);
442
441int __init_or_module phonet_proto_register(int protocol, 443int __init_or_module phonet_proto_register(int protocol,
442 struct phonet_protocol *pp) 444 struct phonet_protocol *pp)
443{ 445{
@@ -450,12 +452,12 @@ int __init_or_module phonet_proto_register(int protocol,
450 if (err) 452 if (err)
451 return err; 453 return err;
452 454
453 spin_lock(&proto_tab_lock); 455 mutex_lock(&proto_tab_lock);
454 if (proto_tab[protocol]) 456 if (proto_tab[protocol])
455 err = -EBUSY; 457 err = -EBUSY;
456 else 458 else
457 proto_tab[protocol] = pp; 459 rcu_assign_pointer(proto_tab[protocol], pp);
458 spin_unlock(&proto_tab_lock); 460 mutex_unlock(&proto_tab_lock);
459 461
460 return err; 462 return err;
461} 463}
@@ -463,10 +465,11 @@ EXPORT_SYMBOL(phonet_proto_register);
463 465
464void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) 466void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
465{ 467{
466 spin_lock(&proto_tab_lock); 468 mutex_lock(&proto_tab_lock);
467 BUG_ON(proto_tab[protocol] != pp); 469 BUG_ON(proto_tab[protocol] != pp);
468 proto_tab[protocol] = NULL; 470 rcu_assign_pointer(proto_tab[protocol], NULL);
469 spin_unlock(&proto_tab_lock); 471 mutex_unlock(&proto_tab_lock);
472 synchronize_rcu();
470 proto_unregister(pp->prot); 473 proto_unregister(pp->prot);
471} 474}
472EXPORT_SYMBOL(phonet_proto_unregister); 475EXPORT_SYMBOL(phonet_proto_unregister);
@@ -480,6 +483,7 @@ static int __init phonet_init(void)
480 if (err) 483 if (err)
481 return err; 484 return err;
482 485
486 pn_sock_init();
483 err = sock_register(&phonet_proto_family); 487 err = sock_register(&phonet_proto_family);
484 if (err) { 488 if (err) {
485 printk(KERN_ALERT 489 printk(KERN_ALERT
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index cbaa1d67d77b..b6356f3832f6 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -714,8 +714,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
714 return -EINVAL; 714 return -EINVAL;
715 715
716 lock_sock(sk); 716 lock_sock(sk);
717 if (sock_flag(sk, SOCK_URGINLINE) 717 if (sock_flag(sk, SOCK_URGINLINE) &&
718 && !skb_queue_empty(&pn->ctrlreq_queue)) 718 !skb_queue_empty(&pn->ctrlreq_queue))
719 answ = skb_peek(&pn->ctrlreq_queue)->len; 719 answ = skb_peek(&pn->ctrlreq_queue)->len;
720 else if (!skb_queue_empty(&sk->sk_receive_queue)) 720 else if (!skb_queue_empty(&sk->sk_receive_queue))
721 answ = skb_peek(&sk->sk_receive_queue)->len; 721 answ = skb_peek(&sk->sk_receive_queue)->len;
@@ -843,7 +843,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
843 struct msghdr *msg, size_t len) 843 struct msghdr *msg, size_t len)
844{ 844{
845 struct pep_sock *pn = pep_sk(sk); 845 struct pep_sock *pn = pep_sk(sk);
846 struct sk_buff *skb = NULL; 846 struct sk_buff *skb;
847 long timeo; 847 long timeo;
848 int flags = msg->msg_flags; 848 int flags = msg->msg_flags;
849 int err, done; 849 int err, done;
@@ -851,6 +851,16 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
851 if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) 851 if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
852 return -EOPNOTSUPP; 852 return -EOPNOTSUPP;
853 853
854 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
855 flags & MSG_DONTWAIT, &err);
856 if (!skb)
857 return -ENOBUFS;
858
859 skb_reserve(skb, MAX_PHONET_HEADER + 3);
860 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
861 if (err < 0)
862 goto outfree;
863
854 lock_sock(sk); 864 lock_sock(sk);
855 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 865 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
856 if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { 866 if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
@@ -894,28 +904,13 @@ disabled:
894 goto disabled; 904 goto disabled;
895 } 905 }
896 906
897 if (!skb) {
898 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
899 flags & MSG_DONTWAIT, &err);
900 if (skb == NULL)
901 goto out;
902 skb_reserve(skb, MAX_PHONET_HEADER + 3);
903
904 if (sk->sk_state != TCP_ESTABLISHED ||
905 !atomic_read(&pn->tx_credits))
906 goto disabled; /* sock_alloc_send_skb might sleep */
907 }
908
909 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
910 if (err < 0)
911 goto out;
912
913 err = pipe_skb_send(sk, skb); 907 err = pipe_skb_send(sk, skb);
914 if (err >= 0) 908 if (err >= 0)
915 err = len; /* success! */ 909 err = len; /* success! */
916 skb = NULL; 910 skb = NULL;
917out: 911out:
918 release_sock(sk); 912 release_sock(sk);
913outfree:
919 kfree_skb(skb); 914 kfree_skb(skb);
920 return err; 915 return err;
921} 916}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 6d64fda1afc9..bc4a33bf2d3d 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -34,7 +34,7 @@
34#include <net/phonet/pn_dev.h> 34#include <net/phonet/pn_dev.h>
35 35
36struct phonet_routes { 36struct phonet_routes {
37 spinlock_t lock; 37 struct mutex lock;
38 struct net_device *table[64]; 38 struct net_device *table[64];
39}; 39};
40 40
@@ -43,7 +43,7 @@ struct phonet_net {
43 struct phonet_routes routes; 43 struct phonet_routes routes;
44}; 44};
45 45
46int phonet_net_id; 46int phonet_net_id __read_mostly;
47 47
48struct phonet_device_list *phonet_device_list(struct net *net) 48struct phonet_device_list *phonet_device_list(struct net *net)
49{ 49{
@@ -61,7 +61,8 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
61 pnd->netdev = dev; 61 pnd->netdev = dev;
62 bitmap_zero(pnd->addrs, 64); 62 bitmap_zero(pnd->addrs, 64);
63 63
64 list_add(&pnd->list, &pndevs->list); 64 BUG_ON(!mutex_is_locked(&pndevs->lock));
65 list_add_rcu(&pnd->list, &pndevs->list);
65 return pnd; 66 return pnd;
66} 67}
67 68
@@ -70,6 +71,7 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
70 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); 71 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
71 struct phonet_device *pnd; 72 struct phonet_device *pnd;
72 73
74 BUG_ON(!mutex_is_locked(&pndevs->lock));
73 list_for_each_entry(pnd, &pndevs->list, list) { 75 list_for_each_entry(pnd, &pndevs->list, list) {
74 if (pnd->netdev == dev) 76 if (pnd->netdev == dev)
75 return pnd; 77 return pnd;
@@ -77,6 +79,18 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
77 return NULL; 79 return NULL;
78} 80}
79 81
82static struct phonet_device *__phonet_get_rcu(struct net_device *dev)
83{
84 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
85 struct phonet_device *pnd;
86
87 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
88 if (pnd->netdev == dev)
89 return pnd;
90 }
91 return NULL;
92}
93
80static void phonet_device_destroy(struct net_device *dev) 94static void phonet_device_destroy(struct net_device *dev)
81{ 95{
82 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); 96 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -84,11 +98,11 @@ static void phonet_device_destroy(struct net_device *dev)
84 98
85 ASSERT_RTNL(); 99 ASSERT_RTNL();
86 100
87 spin_lock_bh(&pndevs->lock); 101 mutex_lock(&pndevs->lock);
88 pnd = __phonet_get(dev); 102 pnd = __phonet_get(dev);
89 if (pnd) 103 if (pnd)
90 list_del(&pnd->list); 104 list_del_rcu(&pnd->list);
91 spin_unlock_bh(&pndevs->lock); 105 mutex_unlock(&pndevs->lock);
92 106
93 if (pnd) { 107 if (pnd) {
94 u8 addr; 108 u8 addr;
@@ -106,8 +120,8 @@ struct net_device *phonet_device_get(struct net *net)
106 struct phonet_device *pnd; 120 struct phonet_device *pnd;
107 struct net_device *dev = NULL; 121 struct net_device *dev = NULL;
108 122
109 spin_lock_bh(&pndevs->lock); 123 rcu_read_lock();
110 list_for_each_entry(pnd, &pndevs->list, list) { 124 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
111 dev = pnd->netdev; 125 dev = pnd->netdev;
112 BUG_ON(!dev); 126 BUG_ON(!dev);
113 127
@@ -118,7 +132,7 @@ struct net_device *phonet_device_get(struct net *net)
118 } 132 }
119 if (dev) 133 if (dev)
120 dev_hold(dev); 134 dev_hold(dev);
121 spin_unlock_bh(&pndevs->lock); 135 rcu_read_unlock();
122 return dev; 136 return dev;
123} 137}
124 138
@@ -128,7 +142,7 @@ int phonet_address_add(struct net_device *dev, u8 addr)
128 struct phonet_device *pnd; 142 struct phonet_device *pnd;
129 int err = 0; 143 int err = 0;
130 144
131 spin_lock_bh(&pndevs->lock); 145 mutex_lock(&pndevs->lock);
132 /* Find or create Phonet-specific device data */ 146 /* Find or create Phonet-specific device data */
133 pnd = __phonet_get(dev); 147 pnd = __phonet_get(dev);
134 if (pnd == NULL) 148 if (pnd == NULL)
@@ -137,7 +151,7 @@ int phonet_address_add(struct net_device *dev, u8 addr)
137 err = -ENOMEM; 151 err = -ENOMEM;
138 else if (test_and_set_bit(addr >> 2, pnd->addrs)) 152 else if (test_and_set_bit(addr >> 2, pnd->addrs))
139 err = -EEXIST; 153 err = -EEXIST;
140 spin_unlock_bh(&pndevs->lock); 154 mutex_unlock(&pndevs->lock);
141 return err; 155 return err;
142} 156}
143 157
@@ -147,27 +161,32 @@ int phonet_address_del(struct net_device *dev, u8 addr)
147 struct phonet_device *pnd; 161 struct phonet_device *pnd;
148 int err = 0; 162 int err = 0;
149 163
150 spin_lock_bh(&pndevs->lock); 164 mutex_lock(&pndevs->lock);
151 pnd = __phonet_get(dev); 165 pnd = __phonet_get(dev);
152 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) 166 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
153 err = -EADDRNOTAVAIL; 167 err = -EADDRNOTAVAIL;
154 else if (bitmap_empty(pnd->addrs, 64)) { 168 pnd = NULL;
155 list_del(&pnd->list); 169 } else if (bitmap_empty(pnd->addrs, 64))
170 list_del_rcu(&pnd->list);
171 else
172 pnd = NULL;
173 mutex_unlock(&pndevs->lock);
174
175 if (pnd) {
176 synchronize_rcu();
156 kfree(pnd); 177 kfree(pnd);
157 } 178 }
158 spin_unlock_bh(&pndevs->lock);
159 return err; 179 return err;
160} 180}
161 181
162/* Gets a source address toward a destination, through a interface. */ 182/* Gets a source address toward a destination, through a interface. */
163u8 phonet_address_get(struct net_device *dev, u8 daddr) 183u8 phonet_address_get(struct net_device *dev, u8 daddr)
164{ 184{
165 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
166 struct phonet_device *pnd; 185 struct phonet_device *pnd;
167 u8 saddr; 186 u8 saddr;
168 187
169 spin_lock_bh(&pndevs->lock); 188 rcu_read_lock();
170 pnd = __phonet_get(dev); 189 pnd = __phonet_get_rcu(dev);
171 if (pnd) { 190 if (pnd) {
172 BUG_ON(bitmap_empty(pnd->addrs, 64)); 191 BUG_ON(bitmap_empty(pnd->addrs, 64));
173 192
@@ -178,7 +197,7 @@ u8 phonet_address_get(struct net_device *dev, u8 daddr)
178 saddr = find_first_bit(pnd->addrs, 64) << 2; 197 saddr = find_first_bit(pnd->addrs, 64) << 2;
179 } else 198 } else
180 saddr = PN_NO_ADDR; 199 saddr = PN_NO_ADDR;
181 spin_unlock_bh(&pndevs->lock); 200 rcu_read_unlock();
182 201
183 if (saddr == PN_NO_ADDR) { 202 if (saddr == PN_NO_ADDR) {
184 /* Fallback to another device */ 203 /* Fallback to another device */
@@ -200,8 +219,8 @@ int phonet_address_lookup(struct net *net, u8 addr)
200 struct phonet_device *pnd; 219 struct phonet_device *pnd;
201 int err = -EADDRNOTAVAIL; 220 int err = -EADDRNOTAVAIL;
202 221
203 spin_lock_bh(&pndevs->lock); 222 rcu_read_lock();
204 list_for_each_entry(pnd, &pndevs->list, list) { 223 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
205 /* Don't allow unregistering devices! */ 224 /* Don't allow unregistering devices! */
206 if ((pnd->netdev->reg_state != NETREG_REGISTERED) || 225 if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
207 ((pnd->netdev->flags & IFF_UP)) != IFF_UP) 226 ((pnd->netdev->flags & IFF_UP)) != IFF_UP)
@@ -213,7 +232,7 @@ int phonet_address_lookup(struct net *net, u8 addr)
213 } 232 }
214 } 233 }
215found: 234found:
216 spin_unlock_bh(&pndevs->lock); 235 rcu_read_unlock();
217 return err; 236 return err;
218} 237}
219 238
@@ -248,17 +267,22 @@ static void phonet_route_autodel(struct net_device *dev)
248 267
249 /* Remove left-over Phonet routes */ 268 /* Remove left-over Phonet routes */
250 bitmap_zero(deleted, 64); 269 bitmap_zero(deleted, 64);
251 spin_lock_bh(&pnn->routes.lock); 270 mutex_lock(&pnn->routes.lock);
252 for (i = 0; i < 64; i++) 271 for (i = 0; i < 64; i++)
253 if (dev == pnn->routes.table[i]) { 272 if (dev == pnn->routes.table[i]) {
273 rcu_assign_pointer(pnn->routes.table[i], NULL);
254 set_bit(i, deleted); 274 set_bit(i, deleted);
255 pnn->routes.table[i] = NULL;
256 dev_put(dev);
257 } 275 }
258 spin_unlock_bh(&pnn->routes.lock); 276 mutex_unlock(&pnn->routes.lock);
277
278 if (bitmap_empty(deleted, 64))
279 return; /* short-circuit RCU */
280 synchronize_rcu();
259 for (i = find_first_bit(deleted, 64); i < 64; 281 for (i = find_first_bit(deleted, 64); i < 64;
260 i = find_next_bit(deleted, 64, i + 1)) 282 i = find_next_bit(deleted, 64, i + 1)) {
261 rtm_phonet_notify(RTM_DELROUTE, dev, i); 283 rtm_phonet_notify(RTM_DELROUTE, dev, i);
284 dev_put(dev);
285 }
262} 286}
263 287
264/* notify Phonet of device events */ 288/* notify Phonet of device events */
@@ -289,19 +313,14 @@ static struct notifier_block phonet_device_notifier = {
289/* Per-namespace Phonet devices handling */ 313/* Per-namespace Phonet devices handling */
290static int phonet_init_net(struct net *net) 314static int phonet_init_net(struct net *net)
291{ 315{
292 struct phonet_net *pnn = kzalloc(sizeof(*pnn), GFP_KERNEL); 316 struct phonet_net *pnn = net_generic(net, phonet_net_id);
293 if (!pnn)
294 return -ENOMEM;
295 317
296 if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) { 318 if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops))
297 kfree(pnn);
298 return -ENOMEM; 319 return -ENOMEM;
299 }
300 320
301 INIT_LIST_HEAD(&pnn->pndevs.list); 321 INIT_LIST_HEAD(&pnn->pndevs.list);
302 spin_lock_init(&pnn->pndevs.lock); 322 mutex_init(&pnn->pndevs.lock);
303 spin_lock_init(&pnn->routes.lock); 323 mutex_init(&pnn->routes.lock);
304 net_assign_generic(net, phonet_net_id, pnn);
305 return 0; 324 return 0;
306} 325}
307 326
@@ -325,18 +344,19 @@ static void phonet_exit_net(struct net *net)
325 rtnl_unlock(); 344 rtnl_unlock();
326 345
327 proc_net_remove(net, "phonet"); 346 proc_net_remove(net, "phonet");
328 kfree(pnn);
329} 347}
330 348
331static struct pernet_operations phonet_net_ops = { 349static struct pernet_operations phonet_net_ops = {
332 .init = phonet_init_net, 350 .init = phonet_init_net,
333 .exit = phonet_exit_net, 351 .exit = phonet_exit_net,
352 .id = &phonet_net_id,
353 .size = sizeof(struct phonet_net),
334}; 354};
335 355
336/* Initialize Phonet devices list */ 356/* Initialize Phonet devices list */
337int __init phonet_device_init(void) 357int __init phonet_device_init(void)
338{ 358{
339 int err = register_pernet_gen_device(&phonet_net_id, &phonet_net_ops); 359 int err = register_pernet_device(&phonet_net_ops);
340 if (err) 360 if (err)
341 return err; 361 return err;
342 362
@@ -351,7 +371,7 @@ void phonet_device_exit(void)
351{ 371{
352 rtnl_unregister_all(PF_PHONET); 372 rtnl_unregister_all(PF_PHONET);
353 unregister_netdevice_notifier(&phonet_device_notifier); 373 unregister_netdevice_notifier(&phonet_device_notifier);
354 unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); 374 unregister_pernet_device(&phonet_net_ops);
355} 375}
356 376
357int phonet_route_add(struct net_device *dev, u8 daddr) 377int phonet_route_add(struct net_device *dev, u8 daddr)
@@ -361,13 +381,13 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
361 int err = -EEXIST; 381 int err = -EEXIST;
362 382
363 daddr = daddr >> 2; 383 daddr = daddr >> 2;
364 spin_lock_bh(&routes->lock); 384 mutex_lock(&routes->lock);
365 if (routes->table[daddr] == NULL) { 385 if (routes->table[daddr] == NULL) {
366 routes->table[daddr] = dev; 386 rcu_assign_pointer(routes->table[daddr], dev);
367 dev_hold(dev); 387 dev_hold(dev);
368 err = 0; 388 err = 0;
369 } 389 }
370 spin_unlock_bh(&routes->lock); 390 mutex_unlock(&routes->lock);
371 return err; 391 return err;
372} 392}
373 393
@@ -375,17 +395,20 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
375{ 395{
376 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); 396 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
377 struct phonet_routes *routes = &pnn->routes; 397 struct phonet_routes *routes = &pnn->routes;
378 int err = -ENOENT;
379 398
380 daddr = daddr >> 2; 399 daddr = daddr >> 2;
381 spin_lock_bh(&routes->lock); 400 mutex_lock(&routes->lock);
382 if (dev == routes->table[daddr]) { 401 if (dev == routes->table[daddr])
383 routes->table[daddr] = NULL; 402 rcu_assign_pointer(routes->table[daddr], NULL);
384 dev_put(dev); 403 else
385 err = 0; 404 dev = NULL;
386 } 405 mutex_unlock(&routes->lock);
387 spin_unlock_bh(&routes->lock); 406
388 return err; 407 if (!dev)
408 return -ENOENT;
409 synchronize_rcu();
410 dev_put(dev);
411 return 0;
389} 412}
390 413
391struct net_device *phonet_route_get(struct net *net, u8 daddr) 414struct net_device *phonet_route_get(struct net *net, u8 daddr)
@@ -397,9 +420,9 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr)
397 ASSERT_RTNL(); /* no need to hold the device */ 420 ASSERT_RTNL(); /* no need to hold the device */
398 421
399 daddr >>= 2; 422 daddr >>= 2;
400 spin_lock_bh(&routes->lock); 423 rcu_read_lock();
401 dev = routes->table[daddr]; 424 dev = rcu_dereference(routes->table[daddr]);
402 spin_unlock_bh(&routes->lock); 425 rcu_read_unlock();
403 return dev; 426 return dev;
404} 427}
405 428
@@ -409,11 +432,12 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr)
409 struct phonet_routes *routes = &pnn->routes; 432 struct phonet_routes *routes = &pnn->routes;
410 struct net_device *dev; 433 struct net_device *dev;
411 434
412 spin_lock_bh(&routes->lock); 435 daddr >>= 2;
413 dev = routes->table[daddr >> 2]; 436 rcu_read_lock();
437 dev = rcu_dereference(routes->table[daddr]);
414 if (dev) 438 if (dev)
415 dev_hold(dev); 439 dev_hold(dev);
416 spin_unlock_bh(&routes->lock); 440 rcu_read_unlock();
417 441
418 if (!dev) 442 if (!dev)
419 dev = phonet_device_get(net); /* Default route */ 443 dev = phonet_device_get(net); /* Default route */
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d8f5d3fb9ee2..2e6c7eb8e76a 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -53,8 +53,7 @@ void phonet_address_notify(int event, struct net_device *dev, u8 addr)
53 RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); 53 RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
54 return; 54 return;
55errout: 55errout:
56 if (err < 0) 56 rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
57 rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
58} 57}
59 58
60static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { 59static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
@@ -132,8 +131,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
132 int addr_idx = 0, addr_start_idx = cb->args[1]; 131 int addr_idx = 0, addr_start_idx = cb->args[1];
133 132
134 pndevs = phonet_device_list(sock_net(skb->sk)); 133 pndevs = phonet_device_list(sock_net(skb->sk));
135 spin_lock_bh(&pndevs->lock); 134 rcu_read_lock();
136 list_for_each_entry(pnd, &pndevs->list, list) { 135 list_for_each_entry_rcu(pnd, &pndevs->list, list) {
137 u8 addr; 136 u8 addr;
138 137
139 if (dev_idx > dev_start_idx) 138 if (dev_idx > dev_start_idx)
@@ -155,7 +154,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
155 } 154 }
156 155
157out: 156out:
158 spin_unlock_bh(&pndevs->lock); 157 rcu_read_unlock();
159 cb->args[0] = dev_idx; 158 cb->args[0] = dev_idx;
160 cb->args[1] = addr_idx; 159 cb->args[1] = addr_idx;
161 160
@@ -212,8 +211,7 @@ void rtm_phonet_notify(int event, struct net_device *dev, u8 dst)
212 RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); 211 RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL);
213 return; 212 return;
214errout: 213errout:
215 if (err < 0) 214 rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
216 rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
217} 215}
218 216
219static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { 217static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = {
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 0412beb59a05..69c8b826a0ce 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -45,13 +45,28 @@ static int pn_socket_release(struct socket *sock)
45 return 0; 45 return 0;
46} 46}
47 47
48#define PN_HASHSIZE 16
49#define PN_HASHMASK (PN_HASHSIZE-1)
50
51
48static struct { 52static struct {
49 struct hlist_head hlist; 53 struct hlist_head hlist[PN_HASHSIZE];
50 spinlock_t lock; 54 spinlock_t lock;
51} pnsocks = { 55} pnsocks;
52 .hlist = HLIST_HEAD_INIT, 56
53 .lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock), 57void __init pn_sock_init(void)
54}; 58{
59 unsigned i;
60
61 for (i = 0; i < PN_HASHSIZE; i++)
62 INIT_HLIST_HEAD(pnsocks.hlist + i);
63 spin_lock_init(&pnsocks.lock);
64}
65
66static struct hlist_head *pn_hash_list(u16 obj)
67{
68 return pnsocks.hlist + (obj & PN_HASHMASK);
69}
55 70
56/* 71/*
57 * Find address based on socket address, match only certain fields. 72 * Find address based on socket address, match only certain fields.
@@ -64,10 +79,11 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
64 struct sock *rval = NULL; 79 struct sock *rval = NULL;
65 u16 obj = pn_sockaddr_get_object(spn); 80 u16 obj = pn_sockaddr_get_object(spn);
66 u8 res = spn->spn_resource; 81 u8 res = spn->spn_resource;
82 struct hlist_head *hlist = pn_hash_list(obj);
67 83
68 spin_lock_bh(&pnsocks.lock); 84 spin_lock_bh(&pnsocks.lock);
69 85
70 sk_for_each(sknode, node, &pnsocks.hlist) { 86 sk_for_each(sknode, node, hlist) {
71 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
72 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
73 89
@@ -82,8 +98,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
82 if (pn->resource != res) 98 if (pn->resource != res)
83 continue; 99 continue;
84 } 100 }
85 if (pn_addr(pn->sobject) 101 if (pn_addr(pn->sobject) &&
86 && pn_addr(pn->sobject) != pn_addr(obj)) 102 pn_addr(pn->sobject) != pn_addr(obj))
87 continue; 103 continue;
88 104
89 rval = sknode; 105 rval = sknode;
@@ -99,31 +115,39 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
99/* Deliver a broadcast packet (only in bottom-half) */ 115/* Deliver a broadcast packet (only in bottom-half) */
100void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) 116void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
101{ 117{
102 struct hlist_node *node; 118 struct hlist_head *hlist = pnsocks.hlist;
103 struct sock *sknode; 119 unsigned h;
104 120
105 spin_lock(&pnsocks.lock); 121 spin_lock(&pnsocks.lock);
106 sk_for_each(sknode, node, &pnsocks.hlist) { 122 for (h = 0; h < PN_HASHSIZE; h++) {
107 struct sk_buff *clone; 123 struct hlist_node *node;
124 struct sock *sknode;
108 125
109 if (!net_eq(sock_net(sknode), net)) 126 sk_for_each(sknode, node, hlist) {
110 continue; 127 struct sk_buff *clone;
111 if (!sock_flag(sknode, SOCK_BROADCAST))
112 continue;
113 128
114 clone = skb_clone(skb, GFP_ATOMIC); 129 if (!net_eq(sock_net(sknode), net))
115 if (clone) { 130 continue;
116 sock_hold(sknode); 131 if (!sock_flag(sknode, SOCK_BROADCAST))
117 sk_receive_skb(sknode, clone, 0); 132 continue;
133
134 clone = skb_clone(skb, GFP_ATOMIC);
135 if (clone) {
136 sock_hold(sknode);
137 sk_receive_skb(sknode, clone, 0);
138 }
118 } 139 }
140 hlist++;
119 } 141 }
120 spin_unlock(&pnsocks.lock); 142 spin_unlock(&pnsocks.lock);
121} 143}
122 144
123void pn_sock_hash(struct sock *sk) 145void pn_sock_hash(struct sock *sk)
124{ 146{
147 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
148
125 spin_lock_bh(&pnsocks.lock); 149 spin_lock_bh(&pnsocks.lock);
126 sk_add_node(sk, &pnsocks.hlist); 150 sk_add_node(sk, hlist);
127 spin_unlock_bh(&pnsocks.lock); 151 spin_unlock_bh(&pnsocks.lock);
128} 152}
129EXPORT_SYMBOL(pn_sock_hash); 153EXPORT_SYMBOL(pn_sock_hash);
@@ -439,15 +463,20 @@ EXPORT_SYMBOL(pn_sock_get_port);
439static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) 463static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
440{ 464{
441 struct net *net = seq_file_net(seq); 465 struct net *net = seq_file_net(seq);
466 struct hlist_head *hlist = pnsocks.hlist;
442 struct hlist_node *node; 467 struct hlist_node *node;
443 struct sock *sknode; 468 struct sock *sknode;
469 unsigned h;
444 470
445 sk_for_each(sknode, node, &pnsocks.hlist) { 471 for (h = 0; h < PN_HASHSIZE; h++) {
446 if (!net_eq(net, sock_net(sknode))) 472 sk_for_each(sknode, node, hlist) {
447 continue; 473 if (!net_eq(net, sock_net(sknode)))
448 if (!pos) 474 continue;
449 return sknode; 475 if (!pos)
450 pos--; 476 return sknode;
477 pos--;
478 }
479 hlist++;
451 } 480 }
452 return NULL; 481 return NULL;
453} 482}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index a202e5b36079..853c52be781f 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -174,8 +174,8 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
174 mask |= (POLLIN | POLLRDNORM); 174 mask |= (POLLIN | POLLRDNORM);
175 spin_unlock(&rs->rs_lock); 175 spin_unlock(&rs->rs_lock);
176 } 176 }
177 if (!list_empty(&rs->rs_recv_queue) 177 if (!list_empty(&rs->rs_recv_queue) ||
178 || !list_empty(&rs->rs_notify_queue)) 178 !list_empty(&rs->rs_notify_queue))
179 mask |= (POLLIN | POLLRDNORM); 179 mask |= (POLLIN | POLLRDNORM);
180 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) 180 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
181 mask |= (POLLOUT | POLLWRNORM); 181 mask |= (POLLOUT | POLLWRNORM);
@@ -265,6 +265,9 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
265 case RDS_GET_MR: 265 case RDS_GET_MR:
266 ret = rds_get_mr(rs, optval, optlen); 266 ret = rds_get_mr(rs, optval, optlen);
267 break; 267 break;
268 case RDS_GET_MR_FOR_DEST:
269 ret = rds_get_mr_for_dest(rs, optval, optlen);
270 break;
268 case RDS_FREE_MR: 271 case RDS_FREE_MR:
269 ret = rds_free_mr(rs, optval, optlen); 272 ret = rds_free_mr(rs, optval, optlen);
270 break; 273 break;
@@ -305,8 +308,8 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
305 if (len < sizeof(int)) 308 if (len < sizeof(int))
306 ret = -EINVAL; 309 ret = -EINVAL;
307 else 310 else
308 if (put_user(rs->rs_recverr, (int __user *) optval) 311 if (put_user(rs->rs_recverr, (int __user *) optval) ||
309 || put_user(sizeof(int), optlen)) 312 put_user(sizeof(int), optlen))
310 ret = -EFAULT; 313 ret = -EFAULT;
311 else 314 else
312 ret = 0; 315 ret = 0;
@@ -407,7 +410,8 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
407 return 0; 410 return 0;
408} 411}
409 412
410static int rds_create(struct net *net, struct socket *sock, int protocol) 413static int rds_create(struct net *net, struct socket *sock, int protocol,
414 int kern)
411{ 415{
412 struct sock *sk; 416 struct sock *sk;
413 417
diff --git a/net/rds/cong.c b/net/rds/cong.c
index dd2711df640b..6d06cac2649c 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -218,6 +218,8 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
218 spin_lock_irqsave(&rds_cong_lock, flags); 218 spin_lock_irqsave(&rds_cong_lock, flags);
219 219
220 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 220 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
221 if (conn->c_loopback)
222 continue;
221 if (!test_and_set_bit(0, &conn->c_map_queued)) { 223 if (!test_and_set_bit(0, &conn->c_map_queued)) {
222 rds_stats_inc(s_cong_update_queued); 224 rds_stats_inc(s_cong_update_queued);
223 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 225 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index cc8b568c0c84..278f607ab603 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -133,10 +133,8 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
133 133
134 spin_lock_irqsave(&rds_conn_lock, flags); 134 spin_lock_irqsave(&rds_conn_lock, flags);
135 conn = rds_conn_lookup(head, laddr, faddr, trans); 135 conn = rds_conn_lookup(head, laddr, faddr, trans);
136 if (conn 136 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
137 && conn->c_loopback 137 !is_outgoing) {
138 && conn->c_trans != &rds_loop_transport
139 && !is_outgoing) {
140 /* This is a looped back IB connection, and we're 138 /* This is a looped back IB connection, and we're
141 * called by the code handling the incoming connect. 139 * called by the code handling the incoming connect.
142 * We need a second connection object into which we 140 * We need a second connection object into which we
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 1378b854cac0..64df4e79b29f 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -98,6 +98,7 @@ struct rds_ib_connection {
98 struct rds_ib_send_work *i_sends; 98 struct rds_ib_send_work *i_sends;
99 99
100 /* rx */ 100 /* rx */
101 struct tasklet_struct i_recv_tasklet;
101 struct mutex i_recv_mutex; 102 struct mutex i_recv_mutex;
102 struct rds_ib_work_ring i_recv_ring; 103 struct rds_ib_work_ring i_recv_ring;
103 struct rds_ib_incoming *i_ibinc; 104 struct rds_ib_incoming *i_ibinc;
@@ -303,6 +304,7 @@ void rds_ib_inc_free(struct rds_incoming *inc);
303int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 304int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
304 size_t size); 305 size_t size);
305void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); 306void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
307void rds_ib_recv_tasklet_fn(unsigned long data);
306void rds_ib_recv_init_ring(struct rds_ib_connection *ic); 308void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
307void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); 309void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
308void rds_ib_recv_init_ack(struct rds_ib_connection *ic); 310void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index c2d372f13dbb..647cb8ffc39b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -377,8 +377,8 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
377 } 377 }
378 378
379 /* Even if len is crap *now* I still want to check it. -ASG */ 379 /* Even if len is crap *now* I still want to check it. -ASG */
380 if (event->param.conn.private_data_len < sizeof (*dp) 380 if (event->param.conn.private_data_len < sizeof (*dp) ||
381 || dp->dp_protocol_major == 0) 381 dp->dp_protocol_major == 0)
382 return RDS_PROTOCOL_3_0; 382 return RDS_PROTOCOL_3_0;
383 383
384 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; 384 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
@@ -694,6 +694,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
694 return -ENOMEM; 694 return -ENOMEM;
695 695
696 INIT_LIST_HEAD(&ic->ib_node); 696 INIT_LIST_HEAD(&ic->ib_node);
697 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
698 (unsigned long) ic);
697 mutex_init(&ic->i_recv_mutex); 699 mutex_init(&ic->i_recv_mutex);
698#ifndef KERNEL_HAS_ATOMIC64 700#ifndef KERNEL_HAS_ATOMIC64
699 spin_lock_init(&ic->i_ack_lock); 701 spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index ef3ab5b7283e..4b0da865a72c 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -187,11 +187,8 @@ void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
187 INIT_LIST_HEAD(list); 187 INIT_LIST_HEAD(list);
188 spin_unlock_irq(list_lock); 188 spin_unlock_irq(list_lock);
189 189
190 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) { 190 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
191 if (ic->conn->c_passive)
192 rds_conn_destroy(ic->conn->c_passive);
193 rds_conn_destroy(ic->conn); 191 rds_conn_destroy(ic->conn);
194 }
195} 192}
196 193
197struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) 194struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
@@ -573,8 +570,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
573 spin_unlock_irqrestore(&pool->list_lock, flags); 570 spin_unlock_irqrestore(&pool->list_lock, flags);
574 571
575 /* If we've pinned too many pages, request a flush */ 572 /* If we've pinned too many pages, request a flush */
576 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned 573 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
577 || atomic_read(&pool->dirty_count) >= pool->max_items / 10) 574 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
578 queue_work(rds_wq, &pool->flush_worker); 575 queue_work(rds_wq, &pool->flush_worker);
579 576
580 if (invalidate) { 577 if (invalidate) {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cd7a6cfcab03..04dc0d3f3c95 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
143 int ret = -ENOMEM; 143 int ret = -ENOMEM;
144 144
145 if (recv->r_ibinc == NULL) { 145 if (recv->r_ibinc == NULL) {
146 if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) { 146 if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
147 rds_ib_stats_inc(s_ib_rx_alloc_limit); 147 rds_ib_stats_inc(s_ib_rx_alloc_limit);
148 goto out; 148 goto out;
149 } 149 }
150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, 150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
151 kptr_gfp); 151 kptr_gfp);
152 if (recv->r_ibinc == NULL) 152 if (recv->r_ibinc == NULL) {
153 atomic_dec(&rds_ib_allocation);
153 goto out; 154 goto out;
154 atomic_inc(&rds_ib_allocation); 155 }
155 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); 156 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
156 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); 157 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
157 } 158 }
@@ -229,8 +230,8 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
229 int ret = 0; 230 int ret = 0;
230 u32 pos; 231 u32 pos;
231 232
232 while ((prefill || rds_conn_up(conn)) 233 while ((prefill || rds_conn_up(conn)) &&
233 && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { 234 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
234 if (pos >= ic->i_recv_ring.w_nr) { 235 if (pos >= ic->i_recv_ring.w_nr) {
235 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 236 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
236 pos); 237 pos);
@@ -770,10 +771,10 @@ static void rds_ib_process_recv(struct rds_connection *conn,
770 hdr = &ibinc->ii_inc.i_hdr; 771 hdr = &ibinc->ii_inc.i_hdr;
771 /* We can't just use memcmp here; fragments of a 772 /* We can't just use memcmp here; fragments of a
772 * single message may carry different ACKs */ 773 * single message may carry different ACKs */
773 if (hdr->h_sequence != ihdr->h_sequence 774 if (hdr->h_sequence != ihdr->h_sequence ||
774 || hdr->h_len != ihdr->h_len 775 hdr->h_len != ihdr->h_len ||
775 || hdr->h_sport != ihdr->h_sport 776 hdr->h_sport != ihdr->h_sport ||
776 || hdr->h_dport != ihdr->h_dport) { 777 hdr->h_dport != ihdr->h_dport) {
777 rds_ib_conn_error(conn, 778 rds_ib_conn_error(conn,
778 "fragment header mismatch; forcing reconnect\n"); 779 "fragment header mismatch; forcing reconnect\n");
779 return; 780 return;
@@ -824,17 +825,22 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
824{ 825{
825 struct rds_connection *conn = context; 826 struct rds_connection *conn = context;
826 struct rds_ib_connection *ic = conn->c_transport_data; 827 struct rds_ib_connection *ic = conn->c_transport_data;
827 struct ib_wc wc;
828 struct rds_ib_ack_state state = { 0, };
829 struct rds_ib_recv_work *recv;
830 828
831 rdsdebug("conn %p cq %p\n", conn, cq); 829 rdsdebug("conn %p cq %p\n", conn, cq);
832 830
833 rds_ib_stats_inc(s_ib_rx_cq_call); 831 rds_ib_stats_inc(s_ib_rx_cq_call);
834 832
835 ib_req_notify_cq(cq, IB_CQ_SOLICITED); 833 tasklet_schedule(&ic->i_recv_tasklet);
834}
835
836static inline void rds_poll_cq(struct rds_ib_connection *ic,
837 struct rds_ib_ack_state *state)
838{
839 struct rds_connection *conn = ic->conn;
840 struct ib_wc wc;
841 struct rds_ib_recv_work *recv;
836 842
837 while (ib_poll_cq(cq, 1, &wc) > 0) { 843 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
838 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 844 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
839 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 845 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
840 be32_to_cpu(wc.ex.imm_data)); 846 be32_to_cpu(wc.ex.imm_data));
@@ -852,7 +858,7 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
852 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 858 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
853 /* We expect errors as the qp is drained during shutdown */ 859 /* We expect errors as the qp is drained during shutdown */
854 if (wc.status == IB_WC_SUCCESS) { 860 if (wc.status == IB_WC_SUCCESS) {
855 rds_ib_process_recv(conn, recv, wc.byte_len, &state); 861 rds_ib_process_recv(conn, recv, wc.byte_len, state);
856 } else { 862 } else {
857 rds_ib_conn_error(conn, "recv completion on " 863 rds_ib_conn_error(conn, "recv completion on "
858 "%pI4 had status %u, disconnecting and " 864 "%pI4 had status %u, disconnecting and "
@@ -863,6 +869,17 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
863 869
864 rds_ib_ring_free(&ic->i_recv_ring, 1); 870 rds_ib_ring_free(&ic->i_recv_ring, 1);
865 } 871 }
872}
873
874void rds_ib_recv_tasklet_fn(unsigned long data)
875{
876 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
877 struct rds_connection *conn = ic->conn;
878 struct rds_ib_ack_state state = { 0, };
879
880 rds_poll_cq(ic, &state);
881 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
882 rds_poll_cq(ic, &state);
866 883
867 if (state.ack_next_valid) 884 if (state.ack_next_valid)
868 rds_ib_set_ack(ic, state.ack_next, state.ack_required); 885 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 23bf830db2d5..a10fab6886d1 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -252,8 +252,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
252 252
253 rds_ib_ring_free(&ic->i_send_ring, completed); 253 rds_ib_ring_free(&ic->i_send_ring, completed);
254 254
255 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) 255 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
256 || test_bit(0, &conn->c_map_queued)) 256 test_bit(0, &conn->c_map_queued))
257 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 257 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
258 258
259 /* We expect errors as the qp is drained during shutdown */ 259 /* We expect errors as the qp is drained during shutdown */
diff --git a/net/rds/iw.h b/net/rds/iw.h
index dd72b62bd506..eef2f0c28476 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -119,6 +119,7 @@ struct rds_iw_connection {
119 struct rds_iw_send_work *i_sends; 119 struct rds_iw_send_work *i_sends;
120 120
121 /* rx */ 121 /* rx */
122 struct tasklet_struct i_recv_tasklet;
122 struct mutex i_recv_mutex; 123 struct mutex i_recv_mutex;
123 struct rds_iw_work_ring i_recv_ring; 124 struct rds_iw_work_ring i_recv_ring;
124 struct rds_iw_incoming *i_iwinc; 125 struct rds_iw_incoming *i_iwinc;
@@ -330,6 +331,7 @@ void rds_iw_inc_free(struct rds_incoming *inc);
330int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 331int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
331 size_t size); 332 size_t size);
332void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); 333void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
334void rds_iw_recv_tasklet_fn(unsigned long data);
333void rds_iw_recv_init_ring(struct rds_iw_connection *ic); 335void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
334void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); 336void rds_iw_recv_clear_ring(struct rds_iw_connection *ic);
335void rds_iw_recv_init_ack(struct rds_iw_connection *ic); 337void rds_iw_recv_init_ack(struct rds_iw_connection *ic);
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a416b0d492b1..394cf6b4d0aa 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -696,6 +696,8 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
696 return -ENOMEM; 696 return -ENOMEM;
697 697
698 INIT_LIST_HEAD(&ic->iw_node); 698 INIT_LIST_HEAD(&ic->iw_node);
699 tasklet_init(&ic->i_recv_tasklet, rds_iw_recv_tasklet_fn,
700 (unsigned long) ic);
699 mutex_init(&ic->i_recv_mutex); 701 mutex_init(&ic->i_recv_mutex);
700#ifndef KERNEL_HAS_ATOMIC64 702#ifndef KERNEL_HAS_ATOMIC64
701 spin_lock_init(&ic->i_ack_lock); 703 spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index de4a1b16bf7b..9eda11cca956 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -245,11 +245,8 @@ void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock)
245 INIT_LIST_HEAD(list); 245 INIT_LIST_HEAD(list);
246 spin_unlock_irq(list_lock); 246 spin_unlock_irq(list_lock);
247 247
248 list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) { 248 list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node)
249 if (ic->conn->c_passive)
250 rds_conn_destroy(ic->conn->c_passive);
251 rds_conn_destroy(ic->conn); 249 rds_conn_destroy(ic->conn);
252 }
253} 250}
254 251
255static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, 252static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
@@ -576,8 +573,8 @@ void rds_iw_free_mr(void *trans_private, int invalidate)
576 rds_iw_free_fastreg(pool, ibmr); 573 rds_iw_free_fastreg(pool, ibmr);
577 574
578 /* If we've pinned too many pages, request a flush */ 575 /* If we've pinned too many pages, request a flush */
579 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned 576 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
580 || atomic_read(&pool->dirty_count) >= pool->max_items / 10) 577 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
581 queue_work(rds_wq, &pool->flush_worker); 578 queue_work(rds_wq, &pool->flush_worker);
582 579
583 if (invalidate) { 580 if (invalidate) {
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 8683f5f66c4b..54af7d6b92da 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
143 int ret = -ENOMEM; 143 int ret = -ENOMEM;
144 144
145 if (recv->r_iwinc == NULL) { 145 if (recv->r_iwinc == NULL) {
146 if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) { 146 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
147 rds_iw_stats_inc(s_iw_rx_alloc_limit); 147 rds_iw_stats_inc(s_iw_rx_alloc_limit);
148 goto out; 148 goto out;
149 } 149 }
150 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, 150 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
151 kptr_gfp); 151 kptr_gfp);
152 if (recv->r_iwinc == NULL) 152 if (recv->r_iwinc == NULL) {
153 atomic_dec(&rds_iw_allocation);
153 goto out; 154 goto out;
154 atomic_inc(&rds_iw_allocation); 155 }
155 INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); 156 INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
156 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); 157 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
157 } 158 }
@@ -229,8 +230,8 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
229 int ret = 0; 230 int ret = 0;
230 u32 pos; 231 u32 pos;
231 232
232 while ((prefill || rds_conn_up(conn)) 233 while ((prefill || rds_conn_up(conn)) &&
233 && rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) { 234 rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
234 if (pos >= ic->i_recv_ring.w_nr) { 235 if (pos >= ic->i_recv_ring.w_nr) {
235 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 236 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
236 pos); 237 pos);
@@ -729,10 +730,10 @@ static void rds_iw_process_recv(struct rds_connection *conn,
729 hdr = &iwinc->ii_inc.i_hdr; 730 hdr = &iwinc->ii_inc.i_hdr;
730 /* We can't just use memcmp here; fragments of a 731 /* We can't just use memcmp here; fragments of a
731 * single message may carry different ACKs */ 732 * single message may carry different ACKs */
732 if (hdr->h_sequence != ihdr->h_sequence 733 if (hdr->h_sequence != ihdr->h_sequence ||
733 || hdr->h_len != ihdr->h_len 734 hdr->h_len != ihdr->h_len ||
734 || hdr->h_sport != ihdr->h_sport 735 hdr->h_sport != ihdr->h_sport ||
735 || hdr->h_dport != ihdr->h_dport) { 736 hdr->h_dport != ihdr->h_dport) {
736 rds_iw_conn_error(conn, 737 rds_iw_conn_error(conn,
737 "fragment header mismatch; forcing reconnect\n"); 738 "fragment header mismatch; forcing reconnect\n");
738 return; 739 return;
@@ -783,17 +784,22 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
783{ 784{
784 struct rds_connection *conn = context; 785 struct rds_connection *conn = context;
785 struct rds_iw_connection *ic = conn->c_transport_data; 786 struct rds_iw_connection *ic = conn->c_transport_data;
786 struct ib_wc wc;
787 struct rds_iw_ack_state state = { 0, };
788 struct rds_iw_recv_work *recv;
789 787
790 rdsdebug("conn %p cq %p\n", conn, cq); 788 rdsdebug("conn %p cq %p\n", conn, cq);
791 789
792 rds_iw_stats_inc(s_iw_rx_cq_call); 790 rds_iw_stats_inc(s_iw_rx_cq_call);
793 791
794 ib_req_notify_cq(cq, IB_CQ_SOLICITED); 792 tasklet_schedule(&ic->i_recv_tasklet);
793}
794
795static inline void rds_poll_cq(struct rds_iw_connection *ic,
796 struct rds_iw_ack_state *state)
797{
798 struct rds_connection *conn = ic->conn;
799 struct ib_wc wc;
800 struct rds_iw_recv_work *recv;
795 801
796 while (ib_poll_cq(cq, 1, &wc) > 0) { 802 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
797 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 803 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
798 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 804 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
799 be32_to_cpu(wc.ex.imm_data)); 805 be32_to_cpu(wc.ex.imm_data));
@@ -811,7 +817,7 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
811 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 817 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
812 /* We expect errors as the qp is drained during shutdown */ 818 /* We expect errors as the qp is drained during shutdown */
813 if (wc.status == IB_WC_SUCCESS) { 819 if (wc.status == IB_WC_SUCCESS) {
814 rds_iw_process_recv(conn, recv, wc.byte_len, &state); 820 rds_iw_process_recv(conn, recv, wc.byte_len, state);
815 } else { 821 } else {
816 rds_iw_conn_error(conn, "recv completion on " 822 rds_iw_conn_error(conn, "recv completion on "
817 "%pI4 had status %u, disconnecting and " 823 "%pI4 had status %u, disconnecting and "
@@ -822,6 +828,17 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
822 828
823 rds_iw_ring_free(&ic->i_recv_ring, 1); 829 rds_iw_ring_free(&ic->i_recv_ring, 1);
824 } 830 }
831}
832
833void rds_iw_recv_tasklet_fn(unsigned long data)
834{
835 struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
836 struct rds_connection *conn = ic->conn;
837 struct rds_iw_ack_state state = { 0, };
838
839 rds_poll_cq(ic, &state);
840 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
841 rds_poll_cq(ic, &state);
825 842
826 if (state.ack_next_valid) 843 if (state.ack_next_valid)
827 rds_iw_set_ack(ic, state.ack_next, state.ack_required); 844 rds_iw_set_ack(ic, state.ack_next, state.ack_required);
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 1f5abe3cf2b4..1379e9d66a78 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -288,8 +288,8 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
288 288
289 rds_iw_ring_free(&ic->i_send_ring, completed); 289 rds_iw_ring_free(&ic->i_send_ring, completed);
290 290
291 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) 291 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
292 || test_bit(0, &conn->c_map_queued)) 292 test_bit(0, &conn->c_map_queued))
293 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 293 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
294 294
295 /* We expect errors as the qp is drained during shutdown */ 295 /* We expect errors as the qp is drained during shutdown */
@@ -519,8 +519,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
519 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 519 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
520 520
521 /* Fastreg support */ 521 /* Fastreg support */
522 if (rds_rdma_cookie_key(rm->m_rdma_cookie) 522 if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) {
523 && !ic->i_fastreg_posted) {
524 ret = -EAGAIN; 523 ret = -EAGAIN;
525 goto out; 524 goto out;
526 } 525 }
diff --git a/net/rds/message.c b/net/rds/message.c
index ca50a8ec9742..73e600ffd87f 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -122,8 +122,7 @@ int rds_message_add_extension(struct rds_header *hdr,
122 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE) 122 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
123 return 0; 123 return 0;
124 124
125 if (type >= __RDS_EXTHDR_MAX 125 if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
126 || len != rds_exthdr_size[type])
127 return 0; 126 return 0;
128 127
129 if (ext_len >= RDS_HEADER_EXT_SPACE) 128 if (ext_len >= RDS_HEADER_EXT_SPACE)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8dc83d2caa58..4c64daa1f5d5 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -317,6 +317,30 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
317 return __rds_rdma_map(rs, &args, NULL, NULL); 317 return __rds_rdma_map(rs, &args, NULL, NULL);
318} 318}
319 319
320int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
321{
322 struct rds_get_mr_for_dest_args args;
323 struct rds_get_mr_args new_args;
324
325 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
326 return -EINVAL;
327
328 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
329 sizeof(struct rds_get_mr_for_dest_args)))
330 return -EFAULT;
331
332 /*
333 * Initially, just behave like get_mr().
334 * TODO: Implement get_mr as wrapper around this
335 * and deprecate it.
336 */
337 new_args.vec = args.vec;
338 new_args.cookie_addr = args.cookie_addr;
339 new_args.flags = args.flags;
340
341 return __rds_rdma_map(rs, &new_args, NULL, NULL);
342}
343
320/* 344/*
321 * Free the MR indicated by the given R_Key 345 * Free the MR indicated by the given R_Key
322 */ 346 */
@@ -607,8 +631,8 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
607{ 631{
608 struct rds_rdma_op *op; 632 struct rds_rdma_op *op;
609 633
610 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) 634 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
611 || rm->m_rdma_op != NULL) 635 rm->m_rdma_op != NULL)
612 return -EINVAL; 636 return -EINVAL;
613 637
614 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg)); 638 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
@@ -631,8 +655,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
631 u32 r_key; 655 u32 r_key;
632 int err = 0; 656 int err = 0;
633 657
634 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) 658 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
635 || rm->m_rdma_cookie != 0) 659 rm->m_rdma_cookie != 0)
636 return -EINVAL; 660 return -EINVAL;
637 661
638 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); 662 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
@@ -668,8 +692,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
668int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, 692int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
669 struct cmsghdr *cmsg) 693 struct cmsghdr *cmsg)
670{ 694{
671 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) 695 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
672 || rm->m_rdma_cookie != 0) 696 rm->m_rdma_cookie != 0)
673 return -EINVAL; 697 return -EINVAL;
674 698
675 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); 699 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
index 425512098b0b..909c39835a5d 100644
--- a/net/rds/rdma.h
+++ b/net/rds/rdma.h
@@ -61,6 +61,7 @@ static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
61} 61}
62 62
63int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); 63int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
64int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
64int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); 65int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
65void rds_rdma_drop_keys(struct rds_sock *rs); 66void rds_rdma_drop_keys(struct rds_sock *rs);
66int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 67int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
diff --git a/net/rds/recv.c b/net/rds/recv.c
index fdff33c7b432..b426d67f760c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -195,8 +195,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
195 * XXX we could spend more on the wire to get more robust failure 195 * XXX we could spend more on the wire to get more robust failure
196 * detection, arguably worth it to avoid data corruption. 196 * detection, arguably worth it to avoid data corruption.
197 */ 197 */
198 if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq 198 if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
199 && (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { 199 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
200 rds_stats_inc(s_recv_drop_old_seq); 200 rds_stats_inc(s_recv_drop_old_seq);
201 goto out; 201 goto out;
202 } 202 }
@@ -432,10 +432,9 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
432 } 432 }
433 433
434 timeo = wait_event_interruptible_timeout(*sk->sk_sleep, 434 timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
435 (!list_empty(&rs->rs_notify_queue) 435 (!list_empty(&rs->rs_notify_queue) ||
436 || rs->rs_cong_notify 436 rs->rs_cong_notify ||
437 || rds_next_incoming(rs, &inc)), 437 rds_next_incoming(rs, &inc)), timeo);
438 timeo);
439 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, 438 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
440 timeo); 439 timeo);
441 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) 440 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
diff --git a/net/rds/send.c b/net/rds/send.c
index 28c88ff3d038..b2fccfc20769 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -235,8 +235,8 @@ int rds_send_xmit(struct rds_connection *conn)
235 * connection. 235 * connection.
236 * Therefore, we never retransmit messages with RDMA ops. 236 * Therefore, we never retransmit messages with RDMA ops.
237 */ 237 */
238 if (rm->m_rdma_op 238 if (rm->m_rdma_op &&
239 && test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 239 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
240 spin_lock_irqsave(&conn->c_lock, flags); 240 spin_lock_irqsave(&conn->c_lock, flags);
241 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 241 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
242 list_move(&rm->m_conn_item, &to_be_dropped); 242 list_move(&rm->m_conn_item, &to_be_dropped);
@@ -247,8 +247,8 @@ int rds_send_xmit(struct rds_connection *conn)
247 247
248 /* Require an ACK every once in a while */ 248 /* Require an ACK every once in a while */
249 len = ntohl(rm->m_inc.i_hdr.h_len); 249 len = ntohl(rm->m_inc.i_hdr.h_len);
250 if (conn->c_unacked_packets == 0 250 if (conn->c_unacked_packets == 0 ||
251 || conn->c_unacked_bytes < len) { 251 conn->c_unacked_bytes < len) {
252 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 252 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
253 253
254 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; 254 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
@@ -418,8 +418,8 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
418 spin_lock(&rm->m_rs_lock); 418 spin_lock(&rm->m_rs_lock);
419 419
420 ro = rm->m_rdma_op; 420 ro = rm->m_rdma_op;
421 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) 421 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
422 && ro && ro->r_notify && ro->r_notifier) { 422 ro && ro->r_notify && ro->r_notifier) {
423 notifier = ro->r_notifier; 423 notifier = ro->r_notifier;
424 rs = rm->m_rs; 424 rs = rm->m_rs;
425 sock_hold(rds_rs_to_sk(rs)); 425 sock_hold(rds_rs_to_sk(rs));
@@ -549,8 +549,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
549 list_del_init(&rm->m_sock_item); 549 list_del_init(&rm->m_sock_item);
550 rds_send_sndbuf_remove(rs, rm); 550 rds_send_sndbuf_remove(rs, rm);
551 551
552 if (ro && ro->r_notifier 552 if (ro && ro->r_notifier && (status || ro->r_notify)) {
553 && (status || ro->r_notify)) {
554 notifier = ro->r_notifier; 553 notifier = ro->r_notifier;
555 list_add_tail(&notifier->n_list, 554 list_add_tail(&notifier->n_list,
556 &rs->rs_notify_queue); 555 &rs->rs_notify_queue);
@@ -877,8 +876,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
877 if (ret) 876 if (ret)
878 goto out; 877 goto out;
879 878
880 if ((rm->m_rdma_cookie || rm->m_rdma_op) 879 if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
881 && conn->c_trans->xmit_rdma == NULL) { 880 conn->c_trans->xmit_rdma == NULL) {
882 if (printk_ratelimit()) 881 if (printk_ratelimit())
883 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 882 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
884 rm->m_rdma_op, conn->c_trans->xmit_rdma); 883 rm->m_rdma_op, conn->c_trans->xmit_rdma);
@@ -890,8 +889,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
890 * have scheduled a delayed reconnect however - in this case 889 * have scheduled a delayed reconnect however - in this case
891 * we should not interfere. 890 * we should not interfere.
892 */ 891 */
893 if (rds_conn_state(conn) == RDS_CONN_DOWN 892 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
894 && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 893 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
895 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 894 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
896 895
897 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 896 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -973,8 +972,8 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
973 * have scheduled a delayed reconnect however - in this case 972 * have scheduled a delayed reconnect however - in this case
974 * we should not interfere. 973 * we should not interfere.
975 */ 974 */
976 if (rds_conn_state(conn) == RDS_CONN_DOWN 975 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
977 && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 976 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
978 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 977 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
979 978
980 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); 979 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index dd7e0cad1e7c..00fa10e59af8 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -170,8 +170,8 @@ void rds_shutdown_worker(struct work_struct *work)
170 * handler is supposed to check for state DISCONNECTING 170 * handler is supposed to check for state DISCONNECTING
171 */ 171 */
172 mutex_lock(&conn->c_cm_lock); 172 mutex_lock(&conn->c_cm_lock);
173 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) 173 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) &&
174 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { 174 !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
175 rds_conn_error(conn, "shutdown called in state %d\n", 175 rds_conn_error(conn, "shutdown called in state %d\n",
176 atomic_read(&conn->c_state)); 176 atomic_read(&conn->c_state));
177 mutex_unlock(&conn->c_cm_lock); 177 mutex_unlock(&conn->c_cm_lock);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ba2efb960c60..448e5a0fcc2e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -592,11 +592,13 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
592 return "wwan"; 592 return "wwan";
593 case RFKILL_TYPE_GPS: 593 case RFKILL_TYPE_GPS:
594 return "gps"; 594 return "gps";
595 case RFKILL_TYPE_FM:
596 return "fm";
595 default: 597 default:
596 BUG(); 598 BUG();
597 } 599 }
598 600
599 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_GPS + 1); 601 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
600} 602}
601 603
602static ssize_t rfkill_type_show(struct device *dev, 604static ssize_t rfkill_type_show(struct device *dev,
@@ -1189,6 +1191,7 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1189#endif 1191#endif
1190 1192
1191static const struct file_operations rfkill_fops = { 1193static const struct file_operations rfkill_fops = {
1194 .owner = THIS_MODULE,
1192 .open = rfkill_fop_open, 1195 .open = rfkill_fop_open,
1193 .read = rfkill_fop_read, 1196 .read = rfkill_fop_read,
1194 .write = rfkill_fop_write, 1197 .write = rfkill_fop_write,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c17734c2ce89..8feb9e5d6623 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -512,12 +512,13 @@ static struct proto rose_proto = {
512 .obj_size = sizeof(struct rose_sock), 512 .obj_size = sizeof(struct rose_sock),
513}; 513};
514 514
515static int rose_create(struct net *net, struct socket *sock, int protocol) 515static int rose_create(struct net *net, struct socket *sock, int protocol,
516 int kern)
516{ 517{
517 struct sock *sk; 518 struct sock *sk;
518 struct rose_sock *rose; 519 struct rose_sock *rose;
519 520
520 if (net != &init_net) 521 if (!net_eq(net, &init_net))
521 return -EAFNOSUPPORT; 522 return -EAFNOSUPPORT;
522 523
523 if (sock->type != SOCK_SEQPACKET || protocol != 0) 524 if (sock->type != SOCK_SEQPACKET || protocol != 0)
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 9478d9b3d977..795c4b025e31 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -77,8 +77,9 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
77 77
78 rose_neigh = rose_neigh_list; 78 rose_neigh = rose_neigh_list;
79 while (rose_neigh != NULL) { 79 while (rose_neigh != NULL) {
80 if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0 80 if (ax25cmp(&rose_route->neighbour,
81 && rose_neigh->dev == dev) 81 &rose_neigh->callsign) == 0 &&
82 rose_neigh->dev == dev)
82 break; 83 break;
83 rose_neigh = rose_neigh->next; 84 rose_neigh = rose_neigh->next;
84 } 85 }
@@ -311,8 +312,9 @@ static int rose_del_node(struct rose_route_struct *rose_route,
311 312
312 rose_neigh = rose_neigh_list; 313 rose_neigh = rose_neigh_list;
313 while (rose_neigh != NULL) { 314 while (rose_neigh != NULL) {
314 if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0 315 if (ax25cmp(&rose_route->neighbour,
315 && rose_neigh->dev == dev) 316 &rose_neigh->callsign) == 0 &&
317 rose_neigh->dev == dev)
316 break; 318 break;
317 rose_neigh = rose_neigh->next; 319 rose_neigh = rose_neigh->next;
318 } 320 }
@@ -578,18 +580,18 @@ static int rose_clear_routes(void)
578 580
579/* 581/*
580 * Check that the device given is a valid AX.25 interface that is "up". 582 * Check that the device given is a valid AX.25 interface that is "up".
583 * called whith RTNL
581 */ 584 */
582static struct net_device *rose_ax25_dev_get(char *devname) 585static struct net_device *rose_ax25_dev_find(char *devname)
583{ 586{
584 struct net_device *dev; 587 struct net_device *dev;
585 588
586 if ((dev = dev_get_by_name(&init_net, devname)) == NULL) 589 if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
587 return NULL; 590 return NULL;
588 591
589 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) 592 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
590 return dev; 593 return dev;
591 594
592 dev_put(dev);
593 return NULL; 595 return NULL;
594} 596}
595 597
@@ -600,13 +602,13 @@ struct net_device *rose_dev_first(void)
600{ 602{
601 struct net_device *dev, *first = NULL; 603 struct net_device *dev, *first = NULL;
602 604
603 read_lock(&dev_base_lock); 605 rcu_read_lock();
604 for_each_netdev(&init_net, dev) { 606 for_each_netdev_rcu(&init_net, dev) {
605 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) 607 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
606 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 608 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
607 first = dev; 609 first = dev;
608 } 610 }
609 read_unlock(&dev_base_lock); 611 rcu_read_unlock();
610 612
611 return first; 613 return first;
612} 614}
@@ -618,8 +620,8 @@ struct net_device *rose_dev_get(rose_address *addr)
618{ 620{
619 struct net_device *dev; 621 struct net_device *dev;
620 622
621 read_lock(&dev_base_lock); 623 rcu_read_lock();
622 for_each_netdev(&init_net, dev) { 624 for_each_netdev_rcu(&init_net, dev) {
623 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { 625 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
624 dev_hold(dev); 626 dev_hold(dev);
625 goto out; 627 goto out;
@@ -627,7 +629,7 @@ struct net_device *rose_dev_get(rose_address *addr)
627 } 629 }
628 dev = NULL; 630 dev = NULL;
629out: 631out:
630 read_unlock(&dev_base_lock); 632 rcu_read_unlock();
631 return dev; 633 return dev;
632} 634}
633 635
@@ -635,14 +637,14 @@ static int rose_dev_exists(rose_address *addr)
635{ 637{
636 struct net_device *dev; 638 struct net_device *dev;
637 639
638 read_lock(&dev_base_lock); 640 rcu_read_lock();
639 for_each_netdev(&init_net, dev) { 641 for_each_netdev_rcu(&init_net, dev) {
640 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) 642 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
641 goto out; 643 goto out;
642 } 644 }
643 dev = NULL; 645 dev = NULL;
644out: 646out:
645 read_unlock(&dev_base_lock); 647 rcu_read_unlock();
646 return dev != NULL; 648 return dev != NULL;
647} 649}
648 650
@@ -720,27 +722,23 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
720 case SIOCADDRT: 722 case SIOCADDRT:
721 if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct))) 723 if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
722 return -EFAULT; 724 return -EFAULT;
723 if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL) 725 if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
724 return -EINVAL; 726 return -EINVAL;
725 if (rose_dev_exists(&rose_route.address)) { /* Can't add routes to ourself */ 727 if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
726 dev_put(dev);
727 return -EINVAL; 728 return -EINVAL;
728 }
729 if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ 729 if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
730 return -EINVAL; 730 return -EINVAL;
731 if (rose_route.ndigis > AX25_MAX_DIGIS) 731 if (rose_route.ndigis > AX25_MAX_DIGIS)
732 return -EINVAL; 732 return -EINVAL;
733 err = rose_add_node(&rose_route, dev); 733 err = rose_add_node(&rose_route, dev);
734 dev_put(dev);
735 return err; 734 return err;
736 735
737 case SIOCDELRT: 736 case SIOCDELRT:
738 if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct))) 737 if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
739 return -EFAULT; 738 return -EFAULT;
740 if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL) 739 if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
741 return -EINVAL; 740 return -EINVAL;
742 err = rose_del_node(&rose_route, dev); 741 err = rose_del_node(&rose_route, dev);
743 dev_put(dev);
744 return err; 742 return err;
745 743
746 case SIOCRSCLRRT: 744 case SIOCRSCLRRT:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 6817c9781ef3..287b1415cee9 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -608,14 +608,15 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
608/* 608/*
609 * create an RxRPC socket 609 * create an RxRPC socket
610 */ 610 */
611static int rxrpc_create(struct net *net, struct socket *sock, int protocol) 611static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
612 int kern)
612{ 613{
613 struct rxrpc_sock *rx; 614 struct rxrpc_sock *rx;
614 struct sock *sk; 615 struct sock *sk;
615 616
616 _enter("%p,%d", sock, protocol); 617 _enter("%p,%d", sock, protocol);
617 618
618 if (net != &init_net) 619 if (!net_eq(net, &init_net))
619 return -EAFNOSUPPORT; 620 return -EAFNOSUPPORT;
620 621
621 /* we support transport protocol UDP only */ 622 /* we support transport protocol UDP only */
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index ca2e1fd2bf69..2a740035aa6b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -969,7 +969,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
969 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 969 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
970 int ret = 0, ovr = 0; 970 int ret = 0, ovr = 0;
971 971
972 if (net != &init_net) 972 if (!net_eq(net, &init_net))
973 return -EINVAL; 973 return -EINVAL;
974 974
975 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 975 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
@@ -1052,7 +1052,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1052 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1052 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
1053 struct nlattr *kind = find_dump_kind(cb->nlh); 1053 struct nlattr *kind = find_dump_kind(cb->nlh);
1054 1054
1055 if (net != &init_net) 1055 if (!net_eq(net, &init_net))
1056 return 0; 1056 return 0;
1057 1057
1058 if (kind == NULL) { 1058 if (kind == NULL) {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index b9aaab4e0354..d329170243cb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -65,48 +65,53 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
65 struct tc_mirred *parm; 65 struct tc_mirred *parm;
66 struct tcf_mirred *m; 66 struct tcf_mirred *m;
67 struct tcf_common *pc; 67 struct tcf_common *pc;
68 struct net_device *dev = NULL; 68 struct net_device *dev;
69 int ret = 0, err; 69 int ret, ok_push = 0;
70 int ok_push = 0;
71 70
72 if (nla == NULL) 71 if (nla == NULL)
73 return -EINVAL; 72 return -EINVAL;
74 73 ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy);
75 err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); 74 if (ret < 0)
76 if (err < 0) 75 return ret;
77 return err;
78
79 if (tb[TCA_MIRRED_PARMS] == NULL) 76 if (tb[TCA_MIRRED_PARMS] == NULL)
80 return -EINVAL; 77 return -EINVAL;
81 parm = nla_data(tb[TCA_MIRRED_PARMS]); 78 parm = nla_data(tb[TCA_MIRRED_PARMS]);
82 79 switch (parm->eaction) {
80 case TCA_EGRESS_MIRROR:
81 case TCA_EGRESS_REDIR:
82 break;
83 default:
84 return -EINVAL;
85 }
83 if (parm->ifindex) { 86 if (parm->ifindex) {
84 dev = __dev_get_by_index(&init_net, parm->ifindex); 87 dev = __dev_get_by_index(&init_net, parm->ifindex);
85 if (dev == NULL) 88 if (dev == NULL)
86 return -ENODEV; 89 return -ENODEV;
87 switch (dev->type) { 90 switch (dev->type) {
88 case ARPHRD_TUNNEL: 91 case ARPHRD_TUNNEL:
89 case ARPHRD_TUNNEL6: 92 case ARPHRD_TUNNEL6:
90 case ARPHRD_SIT: 93 case ARPHRD_SIT:
91 case ARPHRD_IPGRE: 94 case ARPHRD_IPGRE:
92 case ARPHRD_VOID: 95 case ARPHRD_VOID:
93 case ARPHRD_NONE: 96 case ARPHRD_NONE:
94 ok_push = 0; 97 ok_push = 0;
95 break; 98 break;
96 default: 99 default:
97 ok_push = 1; 100 ok_push = 1;
98 break; 101 break;
99 } 102 }
103 } else {
104 dev = NULL;
100 } 105 }
101 106
102 pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); 107 pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
103 if (!pc) { 108 if (!pc) {
104 if (!parm->ifindex) 109 if (dev == NULL)
105 return -EINVAL; 110 return -EINVAL;
106 pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, 111 pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
107 &mirred_idx_gen, &mirred_hash_info); 112 &mirred_idx_gen, &mirred_hash_info);
108 if (IS_ERR(pc)) 113 if (IS_ERR(pc))
109 return PTR_ERR(pc); 114 return PTR_ERR(pc);
110 ret = ACT_P_CREATED; 115 ret = ACT_P_CREATED;
111 } else { 116 } else {
112 if (!ovr) { 117 if (!ovr) {
@@ -119,12 +124,12 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
119 spin_lock_bh(&m->tcf_lock); 124 spin_lock_bh(&m->tcf_lock);
120 m->tcf_action = parm->action; 125 m->tcf_action = parm->action;
121 m->tcfm_eaction = parm->eaction; 126 m->tcfm_eaction = parm->eaction;
122 if (parm->ifindex) { 127 if (dev != NULL) {
123 m->tcfm_ifindex = parm->ifindex; 128 m->tcfm_ifindex = parm->ifindex;
124 if (ret != ACT_P_CREATED) 129 if (ret != ACT_P_CREATED)
125 dev_put(m->tcfm_dev); 130 dev_put(m->tcfm_dev);
126 m->tcfm_dev = dev;
127 dev_hold(dev); 131 dev_hold(dev);
132 m->tcfm_dev = dev;
128 m->tcfm_ok_push = ok_push; 133 m->tcfm_ok_push = ok_push;
129 } 134 }
130 spin_unlock_bh(&m->tcf_lock); 135 spin_unlock_bh(&m->tcf_lock);
@@ -148,57 +153,57 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
148{ 153{
149 struct tcf_mirred *m = a->priv; 154 struct tcf_mirred *m = a->priv;
150 struct net_device *dev; 155 struct net_device *dev;
151 struct sk_buff *skb2 = NULL; 156 struct sk_buff *skb2;
152 u32 at = G_TC_AT(skb->tc_verd); 157 u32 at;
158 int retval, err = 1;
153 159
154 spin_lock(&m->tcf_lock); 160 spin_lock(&m->tcf_lock);
155
156 dev = m->tcfm_dev;
157 m->tcf_tm.lastuse = jiffies; 161 m->tcf_tm.lastuse = jiffies;
158 162
159 if (!(dev->flags&IFF_UP) ) { 163 dev = m->tcfm_dev;
164 if (!(dev->flags & IFF_UP)) {
160 if (net_ratelimit()) 165 if (net_ratelimit())
161 printk("mirred to Houston: device %s is gone!\n", 166 printk("mirred to Houston: device %s is gone!\n",
162 dev->name); 167 dev->name);
163bad_mirred: 168 goto out;
164 if (skb2 != NULL)
165 kfree_skb(skb2);
166 m->tcf_qstats.overlimits++;
167 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
168 m->tcf_bstats.packets++;
169 spin_unlock(&m->tcf_lock);
170 /* should we be asking for packet to be dropped?
171 * may make sense for redirect case only
172 */
173 return TC_ACT_SHOT;
174 } 169 }
175 170
176 skb2 = skb_act_clone(skb, GFP_ATOMIC); 171 skb2 = skb_act_clone(skb, GFP_ATOMIC);
177 if (skb2 == NULL) 172 if (skb2 == NULL)
178 goto bad_mirred; 173 goto out;
179 if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
180 m->tcfm_eaction != TCA_EGRESS_REDIR) {
181 if (net_ratelimit())
182 printk("tcf_mirred unknown action %d\n",
183 m->tcfm_eaction);
184 goto bad_mirred;
185 }
186 174
187 m->tcf_bstats.bytes += qdisc_pkt_len(skb2); 175 m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
188 m->tcf_bstats.packets++; 176 m->tcf_bstats.packets++;
189 if (!(at & AT_EGRESS)) 177 at = G_TC_AT(skb->tc_verd);
178 if (!(at & AT_EGRESS)) {
190 if (m->tcfm_ok_push) 179 if (m->tcfm_ok_push)
191 skb_push(skb2, skb2->dev->hard_header_len); 180 skb_push(skb2, skb2->dev->hard_header_len);
181 }
192 182
193 /* mirror is always swallowed */ 183 /* mirror is always swallowed */
194 if (m->tcfm_eaction != TCA_EGRESS_MIRROR) 184 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
195 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); 185 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
196 186
197 skb2->dev = dev; 187 skb2->dev = dev;
198 skb2->iif = skb->dev->ifindex; 188 skb2->skb_iif = skb->dev->ifindex;
199 dev_queue_xmit(skb2); 189 dev_queue_xmit(skb2);
190 err = 0;
191
192out:
193 if (err) {
194 m->tcf_qstats.overlimits++;
195 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
196 m->tcf_bstats.packets++;
197 /* should we be asking for packet to be dropped?
198 * may make sense for redirect case only
199 */
200 retval = TC_ACT_SHOT;
201 } else {
202 retval = m->tcf_action;
203 }
200 spin_unlock(&m->tcf_lock); 204 spin_unlock(&m->tcf_lock);
201 return m->tcf_action; 205
206 return retval;
202} 207}
203 208
204static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 209static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7cf6c0fbc7a6..3725d8fa29db 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -137,7 +137,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
137 int err; 137 int err;
138 int tp_created = 0; 138 int tp_created = 0;
139 139
140 if (net != &init_net) 140 if (!net_eq(net, &init_net))
141 return -EINVAL; 141 return -EINVAL;
142 142
143replay: 143replay:
@@ -404,6 +404,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
404 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); 404 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
405} 405}
406 406
407/* called with RTNL */
407static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 408static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
408{ 409{
409 struct net *net = sock_net(skb->sk); 410 struct net *net = sock_net(skb->sk);
@@ -417,12 +418,12 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
417 const struct Qdisc_class_ops *cops; 418 const struct Qdisc_class_ops *cops;
418 struct tcf_dump_args arg; 419 struct tcf_dump_args arg;
419 420
420 if (net != &init_net) 421 if (!net_eq(net, &init_net))
421 return 0; 422 return 0;
422 423
423 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 424 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
424 return skb->len; 425 return skb->len;
425 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 426 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
426 return skb->len; 427 return skb->len;
427 428
428 if (!tcm->tcm_parent) 429 if (!tcm->tcm_parent)
@@ -484,7 +485,6 @@ errout:
484 if (cl) 485 if (cl)
485 cops->put(q, cl); 486 cops->put(q, cl);
486out: 487out:
487 dev_put(dev);
488 return skb->len; 488 return skb->len;
489} 489}
490 490
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 9402a7fd3785..e054c62857e1 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -171,7 +171,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
171 171
172static u32 flow_get_iif(const struct sk_buff *skb) 172static u32 flow_get_iif(const struct sk_buff *skb)
173{ 173{
174 return skb->iif; 174 return skb->skb_iif;
175} 175}
176 176
177static u32 flow_get_priority(const struct sk_buff *skb) 177static u32 flow_get_priority(const struct sk_buff *skb)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 7034ea4530e5..dd9414e44200 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -170,21 +170,23 @@ restart:
170 for (s = sht[h1]; s; s = s->next) { 170 for (s = sht[h1]; s; s = s->next) {
171 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && 171 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
172 protocol == s->protocol && 172 protocol == s->protocol &&
173 !(s->dpi.mask & (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) 173 !(s->dpi.mask &
174 (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
174#if RSVP_DST_LEN == 4 175#if RSVP_DST_LEN == 4
175 && dst[0] == s->dst[0] 176 dst[0] == s->dst[0] &&
176 && dst[1] == s->dst[1] 177 dst[1] == s->dst[1] &&
177 && dst[2] == s->dst[2] 178 dst[2] == s->dst[2] &&
178#endif 179#endif
179 && tunnelid == s->tunnelid) { 180 tunnelid == s->tunnelid) {
180 181
181 for (f = s->ht[h2]; f; f = f->next) { 182 for (f = s->ht[h2]; f; f = f->next) {
182 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && 183 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
183 !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) 184 !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
184#if RSVP_DST_LEN == 4 185#if RSVP_DST_LEN == 4
185 && src[0] == f->src[0] 186 &&
186 && src[1] == f->src[1] 187 src[0] == f->src[0] &&
187 && src[2] == f->src[2] 188 src[1] == f->src[1] &&
189 src[2] == f->src[2]
188#endif 190#endif
189 ) { 191 ) {
190 *res = f->res; 192 *res = f->res;
@@ -493,13 +495,13 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
493 for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { 495 for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
494 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && 496 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
495 pinfo && pinfo->protocol == s->protocol && 497 pinfo && pinfo->protocol == s->protocol &&
496 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 498 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
497#if RSVP_DST_LEN == 4 499#if RSVP_DST_LEN == 4
498 && dst[0] == s->dst[0] 500 dst[0] == s->dst[0] &&
499 && dst[1] == s->dst[1] 501 dst[1] == s->dst[1] &&
500 && dst[2] == s->dst[2] 502 dst[2] == s->dst[2] &&
501#endif 503#endif
502 && pinfo->tunnelid == s->tunnelid) { 504 pinfo->tunnelid == s->tunnelid) {
503 505
504insert: 506insert:
505 /* OK, we found appropriate session */ 507 /* OK, we found appropriate session */
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 18d85d259104..24dce8b648a4 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -303,17 +303,18 @@ META_COLLECTOR(var_sk_bound_if)
303{ 303{
304 SKIP_NONLOCAL(skb); 304 SKIP_NONLOCAL(skb);
305 305
306 if (skb->sk->sk_bound_dev_if == 0) { 306 if (skb->sk->sk_bound_dev_if == 0) {
307 dst->value = (unsigned long) "any"; 307 dst->value = (unsigned long) "any";
308 dst->len = 3; 308 dst->len = 3;
309 } else { 309 } else {
310 struct net_device *dev; 310 struct net_device *dev;
311 311
312 dev = dev_get_by_index(&init_net, skb->sk->sk_bound_dev_if); 312 rcu_read_lock();
313 dev = dev_get_by_index_rcu(sock_net(skb->sk),
314 skb->sk->sk_bound_dev_if);
313 *err = var_dev(dev, dst); 315 *err = var_dev(dev, dst);
314 if (dev) 316 rcu_read_unlock();
315 dev_put(dev); 317 }
316 }
317} 318}
318 319
319META_COLLECTOR(int_sk_refcnt) 320META_COLLECTOR(int_sk_refcnt)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1acfd29cc826..75fd1c672c61 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -947,7 +947,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
947 struct Qdisc *p = NULL; 947 struct Qdisc *p = NULL;
948 int err; 948 int err;
949 949
950 if (net != &init_net) 950 if (!net_eq(net, &init_net))
951 return -EINVAL; 951 return -EINVAL;
952 952
953 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 953 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
@@ -1009,7 +1009,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1009 struct Qdisc *q, *p; 1009 struct Qdisc *q, *p;
1010 int err; 1010 int err;
1011 1011
1012 if (net != &init_net) 1012 if (!net_eq(net, &init_net))
1013 return -EINVAL; 1013 return -EINVAL;
1014 1014
1015replay: 1015replay:
@@ -1274,14 +1274,15 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1274 int s_idx, s_q_idx; 1274 int s_idx, s_q_idx;
1275 struct net_device *dev; 1275 struct net_device *dev;
1276 1276
1277 if (net != &init_net) 1277 if (!net_eq(net, &init_net))
1278 return 0; 1278 return 0;
1279 1279
1280 s_idx = cb->args[0]; 1280 s_idx = cb->args[0];
1281 s_q_idx = q_idx = cb->args[1]; 1281 s_q_idx = q_idx = cb->args[1];
1282 read_lock(&dev_base_lock); 1282
1283 rcu_read_lock();
1283 idx = 0; 1284 idx = 0;
1284 for_each_netdev(&init_net, dev) { 1285 for_each_netdev_rcu(&init_net, dev) {
1285 struct netdev_queue *dev_queue; 1286 struct netdev_queue *dev_queue;
1286 1287
1287 if (idx < s_idx) 1288 if (idx < s_idx)
@@ -1302,7 +1303,7 @@ cont:
1302 } 1303 }
1303 1304
1304done: 1305done:
1305 read_unlock(&dev_base_lock); 1306 rcu_read_unlock();
1306 1307
1307 cb->args[0] = idx; 1308 cb->args[0] = idx;
1308 cb->args[1] = q_idx; 1309 cb->args[1] = q_idx;
@@ -1333,7 +1334,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1333 u32 qid = TC_H_MAJ(clid); 1334 u32 qid = TC_H_MAJ(clid);
1334 int err; 1335 int err;
1335 1336
1336 if (net != &init_net) 1337 if (!net_eq(net, &init_net))
1337 return -EINVAL; 1338 return -EINVAL;
1338 1339
1339 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 1340 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
@@ -1575,7 +1576,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1575 struct net_device *dev; 1576 struct net_device *dev;
1576 int t, s_t; 1577 int t, s_t;
1577 1578
1578 if (net != &init_net) 1579 if (!net_eq(net, &init_net))
1579 return 0; 1580 return 0;
1580 1581
1581 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 1582 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4ae6aa562f2b..5173c1e1b19c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -119,32 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
119 spin_unlock(root_lock); 119 spin_unlock(root_lock);
120 120
121 HARD_TX_LOCK(dev, txq, smp_processor_id()); 121 HARD_TX_LOCK(dev, txq, smp_processor_id());
122 if (!netif_tx_queue_stopped(txq) && 122 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
123 !netif_tx_queue_frozen(txq))
124 ret = dev_hard_start_xmit(skb, dev, txq); 123 ret = dev_hard_start_xmit(skb, dev, txq);
124
125 HARD_TX_UNLOCK(dev, txq); 125 HARD_TX_UNLOCK(dev, txq);
126 126
127 spin_lock(root_lock); 127 spin_lock(root_lock);
128 128
129 switch (ret) { 129 if (dev_xmit_complete(ret)) {
130 case NETDEV_TX_OK: 130 /* Driver sent out skb successfully or skb was consumed */
131 /* Driver sent out skb successfully */
132 ret = qdisc_qlen(q); 131 ret = qdisc_qlen(q);
133 break; 132 } else if (ret == NETDEV_TX_LOCKED) {
134
135 case NETDEV_TX_LOCKED:
136 /* Driver try lock failed */ 133 /* Driver try lock failed */
137 ret = handle_dev_cpu_collision(skb, txq, q); 134 ret = handle_dev_cpu_collision(skb, txq, q);
138 break; 135 } else {
139
140 default:
141 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 136 /* Driver returned NETDEV_TX_BUSY - requeue skb */
142 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 137 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
143 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 138 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
144 dev->name, ret, q->q.qlen); 139 dev->name, ret, q->q.qlen);
145 140
146 ret = dev_requeue_skb(skb, q); 141 ret = dev_requeue_skb(skb, q);
147 break;
148 } 142 }
149 143
150 if (ret && (netif_tx_queue_stopped(txq) || 144 if (ret && (netif_tx_queue_stopped(txq) ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2e38d1abd830..508cf5f3a6d5 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1344,8 +1344,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1344 }; 1344 };
1345 1345
1346 /* check for valid classid */ 1346 /* check for valid classid */
1347 if (!classid || TC_H_MAJ(classid ^ sch->handle) 1347 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1348 || htb_find(classid, sch)) 1348 htb_find(classid, sch))
1349 goto failure; 1349 goto failure;
1350 1350
1351 /* check maximal depth */ 1351 /* check maximal depth */
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2b88295cb7b7..d8b10e054627 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -199,9 +199,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
199 * do it now in software before we mangle it. 199 * do it now in software before we mangle it.
200 */ 200 */
201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
202 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) 202 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
203 || (skb->ip_summed == CHECKSUM_PARTIAL 203 (skb->ip_summed == CHECKSUM_PARTIAL &&
204 && skb_checksum_help(skb))) { 204 skb_checksum_help(skb))) {
205 sch->qstats.drops++; 205 sch->qstats.drops++;
206 return NET_XMIT_DROP; 206 return NET_XMIT_DROP;
207 } 207 }
@@ -210,9 +210,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
210 } 210 }
211 211
212 cb = netem_skb_cb(skb); 212 cb = netem_skb_cb(skb);
213 if (q->gap == 0 /* not doing reordering */ 213 if (q->gap == 0 || /* not doing reordering */
214 || q->counter < q->gap /* inside last reordering gap */ 214 q->counter < q->gap || /* inside last reordering gap */
215 || q->reorder < get_crandom(&q->reorder_cor)) { 215 q->reorder < get_crandom(&q->reorder_cor)) {
216 psched_time_t now; 216 psched_time_t now;
217 psched_tdiff_t delay; 217 psched_tdiff_t delay;
218 218
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 5a002c247231..db69637069c4 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -190,10 +190,13 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
190 190
191 if (m->slaves) { 191 if (m->slaves) {
192 if (m->dev->flags & IFF_UP) { 192 if (m->dev->flags & IFF_UP) {
193 if ((m->dev->flags&IFF_POINTOPOINT && !(dev->flags&IFF_POINTOPOINT)) 193 if ((m->dev->flags & IFF_POINTOPOINT &&
194 || (m->dev->flags&IFF_BROADCAST && !(dev->flags&IFF_BROADCAST)) 194 !(dev->flags & IFF_POINTOPOINT)) ||
195 || (m->dev->flags&IFF_MULTICAST && !(dev->flags&IFF_MULTICAST)) 195 (m->dev->flags & IFF_BROADCAST &&
196 || dev->mtu < m->dev->mtu) 196 !(dev->flags & IFF_BROADCAST)) ||
197 (m->dev->flags & IFF_MULTICAST &&
198 !(dev->flags & IFF_MULTICAST)) ||
199 dev->mtu < m->dev->mtu)
197 return -EINVAL; 200 return -EINVAL;
198 } else { 201 } else {
199 if (!(dev->flags&IFF_POINTOPOINT)) 202 if (!(dev->flags&IFF_POINTOPOINT))
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8450960df24f..df5abbff63e2 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -63,6 +63,12 @@
63static void sctp_assoc_bh_rcv(struct work_struct *work); 63static void sctp_assoc_bh_rcv(struct work_struct *work);
64static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 64static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
65 65
66/* Keep track of the new idr low so that we don't re-use association id
67 * numbers too fast. It is protected by they idr spin lock is in the
68 * range of 1 - INT_MAX.
69 */
70static u32 idr_low = 1;
71
66 72
67/* 1st Level Abstractions. */ 73/* 1st Level Abstractions. */
68 74
@@ -167,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
167 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 173 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
168 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 174 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
169 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 175 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
170 sp->autoclose * HZ; 176 (unsigned long)sp->autoclose * HZ;
171 177
172 /* Initilizes the timers */ 178 /* Initilizes the timers */
173 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 179 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -512,7 +518,13 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
512 * to this destination address earlier. The sender MUST set 518 * to this destination address earlier. The sender MUST set
513 * CYCLING_CHANGEOVER to indicate that this switch is a 519 * CYCLING_CHANGEOVER to indicate that this switch is a
514 * double switch to the same destination address. 520 * double switch to the same destination address.
521 *
522 * Really, only bother is we have data queued or outstanding on
523 * the association.
515 */ 524 */
525 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
526 return;
527
516 if (transport->cacc.changeover_active) 528 if (transport->cacc.changeover_active)
517 transport->cacc.cycling_changeover = changeover; 529 transport->cacc.cycling_changeover = changeover;
518 530
@@ -732,6 +744,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
732 744
733 peer->partial_bytes_acked = 0; 745 peer->partial_bytes_acked = 0;
734 peer->flight_size = 0; 746 peer->flight_size = 0;
747 peer->burst_limited = 0;
735 748
736 /* Set the transport's RTO.initial value */ 749 /* Set the transport's RTO.initial value */
737 peer->rto = asoc->rto_initial; 750 peer->rto = asoc->rto_initial;
@@ -1377,8 +1390,9 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1377 case SCTP_STATE_SHUTDOWN_RECEIVED: 1390 case SCTP_STATE_SHUTDOWN_RECEIVED:
1378 case SCTP_STATE_SHUTDOWN_SENT: 1391 case SCTP_STATE_SHUTDOWN_SENT:
1379 if ((asoc->rwnd > asoc->a_rwnd) && 1392 if ((asoc->rwnd > asoc->a_rwnd) &&
1380 ((asoc->rwnd - asoc->a_rwnd) >= 1393 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1381 min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu))) 1394 (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
1395 asoc->pathmtu)))
1382 return 1; 1396 return 1;
1383 break; 1397 break;
1384 default: 1398 default:
@@ -1485,15 +1499,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1485 * local endpoint and the remote peer. 1499 * local endpoint and the remote peer.
1486 */ 1500 */
1487int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1501int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1488 gfp_t gfp) 1502 sctp_scope_t scope, gfp_t gfp)
1489{ 1503{
1490 sctp_scope_t scope;
1491 int flags; 1504 int flags;
1492 1505
1493 /* Use scoping rules to determine the subset of addresses from 1506 /* Use scoping rules to determine the subset of addresses from
1494 * the endpoint. 1507 * the endpoint.
1495 */ 1508 */
1496 scope = sctp_scope(&asoc->peer.active_path->ipaddr);
1497 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1509 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1498 if (asoc->peer.ipv4_address) 1510 if (asoc->peer.ipv4_address)
1499 flags |= SCTP_ADDR4_PEERSUPP; 1511 flags |= SCTP_ADDR4_PEERSUPP;
@@ -1547,7 +1559,12 @@ retry:
1547 1559
1548 spin_lock_bh(&sctp_assocs_id_lock); 1560 spin_lock_bh(&sctp_assocs_id_lock);
1549 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1561 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1550 1, &assoc_id); 1562 idr_low, &assoc_id);
1563 if (!error) {
1564 idr_low = assoc_id + 1;
1565 if (idr_low == INT_MAX)
1566 idr_low = 1;
1567 }
1551 spin_unlock_bh(&sctp_assocs_id_lock); 1568 spin_unlock_bh(&sctp_assocs_id_lock);
1552 if (error == -EAGAIN) 1569 if (error == -EAGAIN)
1553 goto retry; 1570 goto retry;
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index acf7c4d128f7..8e4320040f05 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -263,9 +263,18 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
263 if (0 == i) 263 if (0 == i)
264 frag |= SCTP_DATA_FIRST_FRAG; 264 frag |= SCTP_DATA_FIRST_FRAG;
265 265
266 if ((i == (whole - 1)) && !over) 266 if ((i == (whole - 1)) && !over) {
267 frag |= SCTP_DATA_LAST_FRAG; 267 frag |= SCTP_DATA_LAST_FRAG;
268 268
269 /* The application requests to set the I-bit of the
270 * last DATA chunk of a user message when providing
271 * the user message to the SCTP implementation.
272 */
273 if ((sinfo->sinfo_flags & SCTP_EOF) ||
274 (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
275 frag |= SCTP_DATA_SACK_IMM;
276 }
277
269 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); 278 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
270 279
271 if (!chunk) 280 if (!chunk)
@@ -297,6 +306,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
297 else 306 else
298 frag = SCTP_DATA_LAST_FRAG; 307 frag = SCTP_DATA_LAST_FRAG;
299 308
309 if ((sinfo->sinfo_flags & SCTP_EOF) ||
310 (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
311 frag |= SCTP_DATA_SACK_IMM;
312
300 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); 313 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
301 314
302 if (!chunk) 315 if (!chunk)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bb280e60e00a..cc50fbe99291 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -837,15 +837,16 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
837 if (type & IPV6_ADDR_LINKLOCAL) { 837 if (type & IPV6_ADDR_LINKLOCAL) {
838 if (!addr->v6.sin6_scope_id) 838 if (!addr->v6.sin6_scope_id)
839 return 0; 839 return 0;
840 dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); 840 rcu_read_lock();
841 if (!dev) 841 dev = dev_get_by_index_rcu(&init_net,
842 return 0; 842 addr->v6.sin6_scope_id);
843 if (!ipv6_chk_addr(&init_net, &addr->v6.sin6_addr, 843 if (!dev ||
844 !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
844 dev, 0)) { 845 dev, 0)) {
845 dev_put(dev); 846 rcu_read_unlock();
846 return 0; 847 return 0;
847 } 848 }
848 dev_put(dev); 849 rcu_read_unlock();
849 } else if (type == IPV6_ADDR_MAPPED) { 850 } else if (type == IPV6_ADDR_MAPPED) {
850 if (!opt->v4mapped) 851 if (!opt->v4mapped)
851 return 0; 852 return 0;
@@ -873,10 +874,12 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
873 if (type & IPV6_ADDR_LINKLOCAL) { 874 if (type & IPV6_ADDR_LINKLOCAL) {
874 if (!addr->v6.sin6_scope_id) 875 if (!addr->v6.sin6_scope_id)
875 return 0; 876 return 0;
876 dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); 877 rcu_read_lock();
878 dev = dev_get_by_index_rcu(&init_net,
879 addr->v6.sin6_scope_id);
880 rcu_read_unlock();
877 if (!dev) 881 if (!dev)
878 return 0; 882 return 0;
879 dev_put(dev);
880 } 883 }
881 af = opt->pf->af; 884 af = opt->pf->af;
882 } 885 }
@@ -930,7 +933,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = {
930 .protocol = IPPROTO_SCTP, 933 .protocol = IPPROTO_SCTP,
931 .prot = &sctpv6_prot, 934 .prot = &sctpv6_prot,
932 .ops = &inet6_seqpacket_ops, 935 .ops = &inet6_seqpacket_ops,
933 .capability = -1,
934 .no_check = 0, 936 .no_check = 0,
935 .flags = SCTP_PROTOSW_FLAG 937 .flags = SCTP_PROTOSW_FLAG
936}; 938};
@@ -939,7 +941,6 @@ static struct inet_protosw sctpv6_stream_protosw = {
939 .protocol = IPPROTO_SCTP, 941 .protocol = IPPROTO_SCTP,
940 .prot = &sctpv6_prot, 942 .prot = &sctpv6_prot,
941 .ops = &inet6_seqpacket_ops, 943 .ops = &inet6_seqpacket_ops,
942 .capability = -1,
943 .no_check = 0, 944 .no_check = 0,
944 .flags = SCTP_PROTOSW_FLAG, 945 .flags = SCTP_PROTOSW_FLAG,
945}; 946};
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 5cbda8f1ddfd..7c5589363433 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -429,23 +429,22 @@ int sctp_packet_transmit(struct sctp_packet *packet)
429 list_del_init(&chunk->list); 429 list_del_init(&chunk->list);
430 if (sctp_chunk_is_data(chunk)) { 430 if (sctp_chunk_is_data(chunk)) {
431 431
432 if (!chunk->has_tsn) { 432 if (!chunk->resent) {
433 sctp_chunk_assign_ssn(chunk); 433
434 sctp_chunk_assign_tsn(chunk); 434 /* 6.3.1 C4) When data is in flight and when allowed
435 435 * by rule C5, a new RTT measurement MUST be made each
436 /* 6.3.1 C4) When data is in flight and when allowed 436 * round trip. Furthermore, new RTT measurements
437 * by rule C5, a new RTT measurement MUST be made each 437 * SHOULD be made no more than once per round-trip
438 * round trip. Furthermore, new RTT measurements 438 * for a given destination transport address.
439 * SHOULD be made no more than once per round-trip 439 */
440 * for a given destination transport address.
441 */
442 440
443 if (!tp->rto_pending) { 441 if (!tp->rto_pending) {
444 chunk->rtt_in_progress = 1; 442 chunk->rtt_in_progress = 1;
445 tp->rto_pending = 1; 443 tp->rto_pending = 1;
446 } 444 }
447 } else 445 }
448 chunk->resent = 1; 446
447 chunk->resent = 1;
449 448
450 has_data = 1; 449 has_data = 1;
451 } 450 }
@@ -557,8 +556,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
557 struct timer_list *timer; 556 struct timer_list *timer;
558 unsigned long timeout; 557 unsigned long timeout;
559 558
560 tp->last_time_used = jiffies;
561
562 /* Restart the AUTOCLOSE timer when sending data. */ 559 /* Restart the AUTOCLOSE timer when sending data. */
563 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { 560 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
564 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 561 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
@@ -617,7 +614,6 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
617 sctp_xmit_t retval = SCTP_XMIT_OK; 614 sctp_xmit_t retval = SCTP_XMIT_OK;
618 size_t datasize, rwnd, inflight, flight_size; 615 size_t datasize, rwnd, inflight, flight_size;
619 struct sctp_transport *transport = packet->transport; 616 struct sctp_transport *transport = packet->transport;
620 __u32 max_burst_bytes;
621 struct sctp_association *asoc = transport->asoc; 617 struct sctp_association *asoc = transport->asoc;
622 struct sctp_outq *q = &asoc->outqueue; 618 struct sctp_outq *q = &asoc->outqueue;
623 619
@@ -650,28 +646,6 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
650 } 646 }
651 } 647 }
652 648
653 /* sctpimpguide-05 2.14.2
654 * D) When the time comes for the sender to
655 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
656 * first be applied to limit how many new DATA chunks may be sent.
657 * The limit is applied by adjusting cwnd as follows:
658 * if ((flightsize + Max.Burst * MTU) < cwnd)
659 * cwnd = flightsize + Max.Burst * MTU
660 */
661 max_burst_bytes = asoc->max_burst * asoc->pathmtu;
662 if ((flight_size + max_burst_bytes) < transport->cwnd) {
663 transport->cwnd = flight_size + max_burst_bytes;
664 SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
665 "transport: %p, cwnd: %d, "
666 "ssthresh: %d, flight_size: %d, "
667 "pba: %d\n",
668 __func__, transport,
669 transport->cwnd,
670 transport->ssthresh,
671 transport->flight_size,
672 transport->partial_bytes_acked);
673 }
674
675 /* RFC 2960 6.1 Transmission of DATA Chunks 649 /* RFC 2960 6.1 Transmission of DATA Chunks
676 * 650 *
677 * B) At any given time, the sender MUST NOT transmit new data 651 * B) At any given time, the sender MUST NOT transmit new data
@@ -747,6 +721,8 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
747 /* Has been accepted for transmission. */ 721 /* Has been accepted for transmission. */
748 if (!asoc->peer.prsctp_capable) 722 if (!asoc->peer.prsctp_capable)
749 chunk->msg->can_abandon = 0; 723 chunk->msg->can_abandon = 0;
724 sctp_chunk_assign_tsn(chunk);
725 sctp_chunk_assign_ssn(chunk);
750} 726}
751 727
752static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, 728static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c9f20e28521b..229690f02a1d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -191,8 +191,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
191 __u32 tsn) 191 __u32 tsn)
192{ 192{
193 if (primary->cacc.changeover_active && 193 if (primary->cacc.changeover_active &&
194 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) 194 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
195 || sctp_cacc_skip_3_2(primary, tsn))) 195 sctp_cacc_skip_3_2(primary, tsn)))
196 return 1; 196 return 1;
197 return 0; 197 return 0;
198} 198}
@@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
423 if ((reason == SCTP_RTXR_FAST_RTX && 423 if ((reason == SCTP_RTXR_FAST_RTX &&
424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
426 /* If this chunk was sent less then 1 rto ago, do not
427 * retransmit this chunk, but give the peer time
428 * to acknowlege it. Do this only when
429 * retransmitting due to T3 timeout.
430 */
431 if (reason == SCTP_RTXR_T3_RTX &&
432 time_before(jiffies, chunk->sent_at +
433 transport->last_rto))
434 continue;
435
436 /* RFC 2960 6.2.1 Processing a Received SACK 426 /* RFC 2960 6.2.1 Processing a Received SACK
437 * 427 *
438 * C) Any time a DATA chunk is marked for 428 * C) Any time a DATA chunk is marked for
@@ -931,6 +921,14 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
931 goto sctp_flush_out; 921 goto sctp_flush_out;
932 } 922 }
933 923
924 /* Apply Max.Burst limitation to the current transport in
925 * case it will be used for new data. We are going to
926 * rest it before we return, but we want to apply the limit
927 * to the currently queued data.
928 */
929 if (transport)
930 sctp_transport_burst_limited(transport);
931
934 /* Finally, transmit new packets. */ 932 /* Finally, transmit new packets. */
935 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 933 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
936 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 934 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
@@ -976,6 +974,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
976 packet = &transport->packet; 974 packet = &transport->packet;
977 sctp_packet_config(packet, vtag, 975 sctp_packet_config(packet, vtag,
978 asoc->peer.ecn_capable); 976 asoc->peer.ecn_capable);
977 /* We've switched transports, so apply the
978 * Burst limit to the new transport.
979 */
980 sctp_transport_burst_limited(transport);
979 } 981 }
980 982
981 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", 983 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
@@ -1011,6 +1013,13 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1011 break; 1013 break;
1012 1014
1013 case SCTP_XMIT_OK: 1015 case SCTP_XMIT_OK:
1016 /* The sender is in the SHUTDOWN-PENDING state,
1017 * The sender MAY set the I-bit in the DATA
1018 * chunk header.
1019 */
1020 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1021 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1022
1014 break; 1023 break;
1015 1024
1016 default: 1025 default:
@@ -1063,6 +1072,9 @@ sctp_flush_out:
1063 packet = &t->packet; 1072 packet = &t->packet;
1064 if (!sctp_packet_empty(packet)) 1073 if (!sctp_packet_empty(packet))
1065 error = sctp_packet_transmit(packet); 1074 error = sctp_packet_transmit(packet);
1075
1076 /* Clear the burst limited state, if any */
1077 sctp_transport_burst_reset(t);
1066 } 1078 }
1067 1079
1068 return error; 1080 return error;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d9f4cc2c7869..a3c8988758b1 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -205,14 +205,14 @@ static void sctp_get_local_addr_list(void)
205 struct list_head *pos; 205 struct list_head *pos;
206 struct sctp_af *af; 206 struct sctp_af *af;
207 207
208 read_lock(&dev_base_lock); 208 rcu_read_lock();
209 for_each_netdev(&init_net, dev) { 209 for_each_netdev_rcu(&init_net, dev) {
210 __list_for_each(pos, &sctp_address_families) { 210 __list_for_each(pos, &sctp_address_families) {
211 af = list_entry(pos, struct sctp_af, list); 211 af = list_entry(pos, struct sctp_af, list);
212 af->copy_addrlist(&sctp_local_addr_list, dev); 212 af->copy_addrlist(&sctp_local_addr_list, dev);
213 } 213 }
214 } 214 }
215 read_unlock(&dev_base_lock); 215 rcu_read_unlock();
216} 216}
217 217
218/* Free the existing local addresses. */ 218/* Free the existing local addresses. */
@@ -909,7 +909,6 @@ static struct inet_protosw sctp_seqpacket_protosw = {
909 .protocol = IPPROTO_SCTP, 909 .protocol = IPPROTO_SCTP,
910 .prot = &sctp_prot, 910 .prot = &sctp_prot,
911 .ops = &inet_seqpacket_ops, 911 .ops = &inet_seqpacket_ops,
912 .capability = -1,
913 .no_check = 0, 912 .no_check = 0,
914 .flags = SCTP_PROTOSW_FLAG 913 .flags = SCTP_PROTOSW_FLAG
915}; 914};
@@ -918,7 +917,6 @@ static struct inet_protosw sctp_stream_protosw = {
918 .protocol = IPPROTO_SCTP, 917 .protocol = IPPROTO_SCTP,
919 .prot = &sctp_prot, 918 .prot = &sctp_prot,
920 .ops = &inet_seqpacket_ops, 919 .ops = &inet_seqpacket_ops,
921 .capability = -1,
922 .no_check = 0, 920 .no_check = 0,
923 .flags = SCTP_PROTOSW_FLAG 921 .flags = SCTP_PROTOSW_FLAG
924}; 922};
@@ -1260,6 +1258,9 @@ SCTP_STATIC __init int sctp_init(void)
1260 /* Set SCOPE policy to enabled */ 1258 /* Set SCOPE policy to enabled */
1261 sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE; 1259 sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
1262 1260
1261 /* Set the default rwnd update threshold */
1262 sctp_rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
1263
1263 sctp_sysctl_register(); 1264 sctp_sysctl_register();
1264 1265
1265 INIT_LIST_HEAD(&sctp_address_families); 1266 INIT_LIST_HEAD(&sctp_address_families);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9d881a61ac02..9e732916b671 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -987,7 +987,10 @@ static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
987 987
988 target = skb_put(chunk->skb, len); 988 target = skb_put(chunk->skb, len);
989 989
990 memcpy(target, data, len); 990 if (data)
991 memcpy(target, data, len);
992 else
993 memset(target, 0, len);
991 994
992 /* Adjust the chunk length field. */ 995 /* Adjust the chunk length field. */
993 chunk->chunk_hdr->length = htons(chunklen + len); 996 chunk->chunk_hdr->length = htons(chunklen + len);
@@ -1129,16 +1132,18 @@ nodata:
1129struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, 1132struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
1130 const struct sctp_chunk *chunk, 1133 const struct sctp_chunk *chunk,
1131 __be16 cause_code, const void *payload, 1134 __be16 cause_code, const void *payload,
1132 size_t paylen) 1135 size_t paylen, size_t reserve_tail)
1133{ 1136{
1134 struct sctp_chunk *retval; 1137 struct sctp_chunk *retval;
1135 1138
1136 retval = sctp_make_op_error_space(asoc, chunk, paylen); 1139 retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail);
1137 if (!retval) 1140 if (!retval)
1138 goto nodata; 1141 goto nodata;
1139 1142
1140 sctp_init_cause(retval, cause_code, paylen); 1143 sctp_init_cause(retval, cause_code, paylen + reserve_tail);
1141 sctp_addto_chunk(retval, paylen, payload); 1144 sctp_addto_chunk(retval, paylen, payload);
1145 if (reserve_tail)
1146 sctp_addto_param(retval, reserve_tail, NULL);
1142 1147
1143nodata: 1148nodata:
1144 return retval; 1149 return retval;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8674d4919556..d771cc1b777a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -217,8 +217,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
217 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 217 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
218 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 218 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
219 } else { 219 } else {
220 if (asoc->a_rwnd > asoc->rwnd) 220 asoc->a_rwnd = asoc->rwnd;
221 asoc->a_rwnd = asoc->rwnd;
222 sack = sctp_make_sack(asoc); 221 sack = sctp_make_sack(asoc);
223 if (!sack) 222 if (!sack)
224 goto nomem; 223 goto nomem;
@@ -480,7 +479,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
480 * that indicates that we have an outstanding HB. 479 * that indicates that we have an outstanding HB.
481 */ 480 */
482 if (!is_hb || transport->hb_sent) { 481 if (!is_hb || transport->hb_sent) {
483 transport->last_rto = transport->rto;
484 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 482 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
485 } 483 }
486} 484}
@@ -1418,6 +1416,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1418 asoc->init_last_sent_to = t; 1416 asoc->init_last_sent_to = t;
1419 chunk->transport = t; 1417 chunk->transport = t;
1420 t->init_sent_count++; 1418 t->init_sent_count++;
1419 /* Set the new transport as primary */
1420 sctp_assoc_set_primary(asoc, t);
1421 break; 1421 break;
1422 1422
1423 case SCTP_CMD_INIT_RESTART: 1423 case SCTP_CMD_INIT_RESTART:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c8fae1983dd1..1ef9de9bbae9 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
384 if (!new_asoc) 384 if (!new_asoc)
385 goto nomem; 385 goto nomem;
386 386
387 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
388 sctp_scope(sctp_source(chunk)),
389 GFP_ATOMIC) < 0)
390 goto nomem_init;
391
387 /* The call, sctp_process_init(), can fail on memory allocation. */ 392 /* The call, sctp_process_init(), can fail on memory allocation. */
388 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 393 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
389 sctp_source(chunk), 394 sctp_source(chunk),
@@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
401 len = ntohs(err_chunk->chunk_hdr->length) - 406 len = ntohs(err_chunk->chunk_hdr->length) -
402 sizeof(sctp_chunkhdr_t); 407 sizeof(sctp_chunkhdr_t);
403 408
404 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
405 goto nomem_init;
406
407 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 409 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
408 if (!repl) 410 if (!repl)
409 goto nomem_init; 411 goto nomem_init;
@@ -994,14 +996,15 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
994 sctp_sf_heartbeat(ep, asoc, type, arg, 996 sctp_sf_heartbeat(ep, asoc, type, arg,
995 commands)) 997 commands))
996 return SCTP_DISPOSITION_NOMEM; 998 return SCTP_DISPOSITION_NOMEM;
999
997 /* Set transport error counter and association error counter 1000 /* Set transport error counter and association error counter
998 * when sending heartbeat. 1001 * when sending heartbeat.
999 */ 1002 */
1000 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
1001 SCTP_TRANSPORT(transport));
1002 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, 1003 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
1003 SCTP_TRANSPORT(transport)); 1004 SCTP_TRANSPORT(transport));
1004 } 1005 }
1006 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
1007 SCTP_TRANSPORT(transport));
1005 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, 1008 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
1006 SCTP_TRANSPORT(transport)); 1009 SCTP_TRANSPORT(transport));
1007 1010
@@ -1452,6 +1455,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1452 if (!new_asoc) 1455 if (!new_asoc)
1453 goto nomem; 1456 goto nomem;
1454 1457
1458 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
1459 sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
1460 goto nomem;
1461
1455 /* In the outbound INIT ACK the endpoint MUST copy its current 1462 /* In the outbound INIT ACK the endpoint MUST copy its current
1456 * Verification Tag and Peers Verification tag into a reserved 1463 * Verification Tag and Peers Verification tag into a reserved
1457 * place (local tie-tag and per tie-tag) within the state cookie. 1464 * place (local tie-tag and per tie-tag) within the state cookie.
@@ -1488,9 +1495,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1488 sizeof(sctp_chunkhdr_t); 1495 sizeof(sctp_chunkhdr_t);
1489 } 1496 }
1490 1497
1491 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
1492 goto nomem;
1493
1494 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 1498 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
1495 if (!repl) 1499 if (!repl)
1496 goto nomem; 1500 goto nomem;
@@ -1717,7 +1721,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1717 1721
1718 err = sctp_make_op_error(asoc, chunk, 1722 err = sctp_make_op_error(asoc, chunk,
1719 SCTP_ERROR_COOKIE_IN_SHUTDOWN, 1723 SCTP_ERROR_COOKIE_IN_SHUTDOWN,
1720 NULL, 0); 1724 NULL, 0, 0);
1721 if (err) 1725 if (err)
1722 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1726 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1723 SCTP_CHUNK(err)); 1727 SCTP_CHUNK(err));
@@ -2865,6 +2869,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2865 sctp_cmd_seq_t *commands) 2869 sctp_cmd_seq_t *commands)
2866{ 2870{
2867 struct sctp_chunk *chunk = arg; 2871 struct sctp_chunk *chunk = arg;
2872 sctp_arg_t force = SCTP_NOFORCE();
2868 int error; 2873 int error;
2869 2874
2870 if (!sctp_vtag_verify(chunk, asoc)) { 2875 if (!sctp_vtag_verify(chunk, asoc)) {
@@ -2898,6 +2903,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2898 BUG(); 2903 BUG();
2899 } 2904 }
2900 2905
2906 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
2907 force = SCTP_FORCE();
2908
2901 if (asoc->autoclose) { 2909 if (asoc->autoclose) {
2902 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 2910 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
2903 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 2911 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -2926,7 +2934,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2926 * more aggressive than the following algorithms allow. 2934 * more aggressive than the following algorithms allow.
2927 */ 2935 */
2928 if (chunk->end_of_packet) 2936 if (chunk->end_of_packet)
2929 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); 2937 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
2930 2938
2931 return SCTP_DISPOSITION_CONSUME; 2939 return SCTP_DISPOSITION_CONSUME;
2932 2940
@@ -2951,7 +2959,7 @@ discard_force:
2951 2959
2952discard_noforce: 2960discard_noforce:
2953 if (chunk->end_of_packet) 2961 if (chunk->end_of_packet)
2954 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); 2962 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
2955 2963
2956 return SCTP_DISPOSITION_DISCARD; 2964 return SCTP_DISPOSITION_DISCARD;
2957consume: 2965consume:
@@ -3970,7 +3978,7 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
3970 err_chunk = sctp_make_op_error(asoc, chunk, 3978 err_chunk = sctp_make_op_error(asoc, chunk,
3971 SCTP_ERROR_UNSUP_HMAC, 3979 SCTP_ERROR_UNSUP_HMAC,
3972 &auth_hdr->hmac_id, 3980 &auth_hdr->hmac_id,
3973 sizeof(__u16)); 3981 sizeof(__u16), 0);
3974 if (err_chunk) { 3982 if (err_chunk) {
3975 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3983 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3976 SCTP_CHUNK(err_chunk)); 3984 SCTP_CHUNK(err_chunk));
@@ -4062,7 +4070,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4062 hdr = unk_chunk->chunk_hdr; 4070 hdr = unk_chunk->chunk_hdr;
4063 err_chunk = sctp_make_op_error(asoc, unk_chunk, 4071 err_chunk = sctp_make_op_error(asoc, unk_chunk,
4064 SCTP_ERROR_UNKNOWN_CHUNK, hdr, 4072 SCTP_ERROR_UNKNOWN_CHUNK, hdr,
4065 WORD_ROUND(ntohs(hdr->length))); 4073 WORD_ROUND(ntohs(hdr->length)),
4074 0);
4066 if (err_chunk) { 4075 if (err_chunk) {
4067 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4076 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4068 SCTP_CHUNK(err_chunk)); 4077 SCTP_CHUNK(err_chunk));
@@ -4081,7 +4090,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4081 hdr = unk_chunk->chunk_hdr; 4090 hdr = unk_chunk->chunk_hdr;
4082 err_chunk = sctp_make_op_error(asoc, unk_chunk, 4091 err_chunk = sctp_make_op_error(asoc, unk_chunk,
4083 SCTP_ERROR_UNKNOWN_CHUNK, hdr, 4092 SCTP_ERROR_UNKNOWN_CHUNK, hdr,
4084 WORD_ROUND(ntohs(hdr->length))); 4093 WORD_ROUND(ntohs(hdr->length)),
4094 0);
4085 if (err_chunk) { 4095 if (err_chunk) {
4086 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4096 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4087 SCTP_CHUNK(err_chunk)); 4097 SCTP_CHUNK(err_chunk));
@@ -6045,7 +6055,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6045 6055
6046 err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, 6056 err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
6047 &data_hdr->stream, 6057 &data_hdr->stream,
6048 sizeof(data_hdr->stream)); 6058 sizeof(data_hdr->stream),
6059 sizeof(u16));
6049 if (err) 6060 if (err)
6050 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 6061 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
6051 SCTP_CHUNK(err)); 6062 SCTP_CHUNK(err));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 4085db99033d..89ab66e54740 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk,
1080 err = -ENOMEM; 1080 err = -ENOMEM;
1081 goto out_free; 1081 goto out_free;
1082 } 1082 }
1083
1084 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1085 GFP_KERNEL);
1086 if (err < 0) {
1087 goto out_free;
1088 }
1089
1083 } 1090 }
1084 1091
1085 /* Prime the peer's transport structures. */ 1092 /* Prime the peer's transport structures. */
@@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk,
1095 walk_size += af->sockaddr_len; 1102 walk_size += af->sockaddr_len;
1096 } 1103 }
1097 1104
1098 err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
1099 if (err < 0) {
1100 goto out_free;
1101 }
1102
1103 /* In case the user of sctp_connectx() wants an association 1105 /* In case the user of sctp_connectx() wants an association
1104 * id back, assign one now. 1106 * id back, assign one now.
1105 */ 1107 */
@@ -1274,22 +1276,30 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1274} 1276}
1275 1277
1276/* 1278/*
1277 * New (hopefully final) interface for the API. The option buffer is used 1279 * New (hopefully final) interface for the API.
1278 * both for the returned association id and the addresses. 1280 * We use the sctp_getaddrs_old structure so that use-space library
1281 * can avoid any unnecessary allocations. The only defferent part
1282 * is that we store the actual length of the address buffer into the
1283 * addrs_num structure member. That way we can re-use the existing
1284 * code.
1279 */ 1285 */
1280SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, 1286SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
1281 char __user *optval, 1287 char __user *optval,
1282 int __user *optlen) 1288 int __user *optlen)
1283{ 1289{
1290 struct sctp_getaddrs_old param;
1284 sctp_assoc_t assoc_id = 0; 1291 sctp_assoc_t assoc_id = 0;
1285 int err = 0; 1292 int err = 0;
1286 1293
1287 if (len < sizeof(assoc_id)) 1294 if (len < sizeof(param))
1288 return -EINVAL; 1295 return -EINVAL;
1289 1296
1297 if (copy_from_user(&param, optval, sizeof(param)))
1298 return -EFAULT;
1299
1290 err = __sctp_setsockopt_connectx(sk, 1300 err = __sctp_setsockopt_connectx(sk,
1291 (struct sockaddr __user *)(optval + sizeof(assoc_id)), 1301 (struct sockaddr __user *)param.addrs,
1292 len - sizeof(assoc_id), &assoc_id); 1302 param.addr_num, &assoc_id);
1293 1303
1294 if (err == 0 || err == -EINPROGRESS) { 1304 if (err == 0 || err == -EINPROGRESS) {
1295 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1305 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
@@ -1689,6 +1699,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1689 goto out_unlock; 1699 goto out_unlock;
1690 } 1700 }
1691 asoc = new_asoc; 1701 asoc = new_asoc;
1702 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1703 if (err < 0) {
1704 err = -ENOMEM;
1705 goto out_free;
1706 }
1692 1707
1693 /* If the SCTP_INIT ancillary data is specified, set all 1708 /* If the SCTP_INIT ancillary data is specified, set all
1694 * the association init values accordingly. 1709 * the association init values accordingly.
@@ -1718,11 +1733,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1718 err = -ENOMEM; 1733 err = -ENOMEM;
1719 goto out_free; 1734 goto out_free;
1720 } 1735 }
1721 err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
1722 if (err < 0) {
1723 err = -ENOMEM;
1724 goto out_free;
1725 }
1726 } 1736 }
1727 1737
1728 /* ASSERT: we have a valid association at this point. */ 1738 /* ASSERT: we have a valid association at this point. */
@@ -2076,6 +2086,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2076 return -EINVAL; 2086 return -EINVAL;
2077 if (copy_from_user(&sp->autoclose, optval, optlen)) 2087 if (copy_from_user(&sp->autoclose, optval, optlen))
2078 return -EFAULT; 2088 return -EFAULT;
2089 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
2090 if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) )
2091 sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ;
2079 2092
2080 return 0; 2093 return 0;
2081} 2094}
@@ -2301,11 +2314,10 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2301 } 2314 }
2302 } 2315 }
2303 2316
2304 /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value 2317 /* Note that a value of zero indicates the current setting should be
2305 * of this field is ignored. Note also that a value of zero 2318 left unchanged.
2306 * indicates the current setting should be left unchanged.
2307 */ 2319 */
2308 if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) { 2320 if (params->spp_pathmaxrxt) {
2309 if (trans) { 2321 if (trans) {
2310 trans->pathmaxrxt = params->spp_pathmaxrxt; 2322 trans->pathmaxrxt = params->spp_pathmaxrxt;
2311 } else if (asoc) { 2323 } else if (asoc) {
@@ -2344,8 +2356,8 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2344 pmtud_change == SPP_PMTUD || 2356 pmtud_change == SPP_PMTUD ||
2345 sackdelay_change == SPP_SACKDELAY || 2357 sackdelay_change == SPP_SACKDELAY ||
2346 params.spp_sackdelay > 500 || 2358 params.spp_sackdelay > 500 ||
2347 (params.spp_pathmtu 2359 (params.spp_pathmtu &&
2348 && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2360 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2349 return -EINVAL; 2361 return -EINVAL;
2350 2362
2351 /* If an address other than INADDR_ANY is specified, and 2363 /* If an address other than INADDR_ANY is specified, and
@@ -4339,90 +4351,6 @@ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval
4339 return 0; 4351 return 0;
4340} 4352}
4341 4353
4342static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
4343 char __user *optval,
4344 int __user *optlen)
4345{
4346 sctp_assoc_t id;
4347 struct sctp_association *asoc;
4348 struct list_head *pos;
4349 int cnt = 0;
4350
4351 if (len < sizeof(sctp_assoc_t))
4352 return -EINVAL;
4353
4354 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4355 return -EFAULT;
4356
4357 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD "
4358 "socket option deprecated\n");
4359 /* For UDP-style sockets, id specifies the association to query. */
4360 asoc = sctp_id2assoc(sk, id);
4361 if (!asoc)
4362 return -EINVAL;
4363
4364 list_for_each(pos, &asoc->peer.transport_addr_list) {
4365 cnt ++;
4366 }
4367
4368 return cnt;
4369}
4370
4371/*
4372 * Old API for getting list of peer addresses. Does not work for 32-bit
4373 * programs running on a 64-bit kernel
4374 */
4375static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
4376 char __user *optval,
4377 int __user *optlen)
4378{
4379 struct sctp_association *asoc;
4380 int cnt = 0;
4381 struct sctp_getaddrs_old getaddrs;
4382 struct sctp_transport *from;
4383 void __user *to;
4384 union sctp_addr temp;
4385 struct sctp_sock *sp = sctp_sk(sk);
4386 int addrlen;
4387
4388 if (len < sizeof(struct sctp_getaddrs_old))
4389 return -EINVAL;
4390
4391 len = sizeof(struct sctp_getaddrs_old);
4392
4393 if (copy_from_user(&getaddrs, optval, len))
4394 return -EFAULT;
4395
4396 if (getaddrs.addr_num <= 0) return -EINVAL;
4397
4398 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD "
4399 "socket option deprecated\n");
4400
4401 /* For UDP-style sockets, id specifies the association to query. */
4402 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4403 if (!asoc)
4404 return -EINVAL;
4405
4406 to = (void __user *)getaddrs.addrs;
4407 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4408 transports) {
4409 memcpy(&temp, &from->ipaddr, sizeof(temp));
4410 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4411 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
4412 if (copy_to_user(to, &temp, addrlen))
4413 return -EFAULT;
4414 to += addrlen ;
4415 cnt ++;
4416 if (cnt >= getaddrs.addr_num) break;
4417 }
4418 getaddrs.addr_num = cnt;
4419 if (put_user(len, optlen))
4420 return -EFAULT;
4421 if (copy_to_user(optval, &getaddrs, len))
4422 return -EFAULT;
4423
4424 return 0;
4425}
4426 4354
4427static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4355static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4428 char __user *optval, int __user *optlen) 4356 char __user *optval, int __user *optlen)
@@ -4475,125 +4403,6 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4475 return 0; 4403 return 0;
4476} 4404}
4477 4405
4478static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4479 char __user *optval,
4480 int __user *optlen)
4481{
4482 sctp_assoc_t id;
4483 struct sctp_bind_addr *bp;
4484 struct sctp_association *asoc;
4485 struct sctp_sockaddr_entry *addr;
4486 int cnt = 0;
4487
4488 if (len < sizeof(sctp_assoc_t))
4489 return -EINVAL;
4490
4491 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4492 return -EFAULT;
4493
4494 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD "
4495 "socket option deprecated\n");
4496
4497 /*
4498 * For UDP-style sockets, id specifies the association to query.
4499 * If the id field is set to the value '0' then the locally bound
4500 * addresses are returned without regard to any particular
4501 * association.
4502 */
4503 if (0 == id) {
4504 bp = &sctp_sk(sk)->ep->base.bind_addr;
4505 } else {
4506 asoc = sctp_id2assoc(sk, id);
4507 if (!asoc)
4508 return -EINVAL;
4509 bp = &asoc->base.bind_addr;
4510 }
4511
4512 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
4513 * addresses from the global local address list.
4514 */
4515 if (sctp_list_single_entry(&bp->address_list)) {
4516 addr = list_entry(bp->address_list.next,
4517 struct sctp_sockaddr_entry, list);
4518 if (sctp_is_any(sk, &addr->a)) {
4519 rcu_read_lock();
4520 list_for_each_entry_rcu(addr,
4521 &sctp_local_addr_list, list) {
4522 if (!addr->valid)
4523 continue;
4524
4525 if ((PF_INET == sk->sk_family) &&
4526 (AF_INET6 == addr->a.sa.sa_family))
4527 continue;
4528
4529 if ((PF_INET6 == sk->sk_family) &&
4530 inet_v6_ipv6only(sk) &&
4531 (AF_INET == addr->a.sa.sa_family))
4532 continue;
4533
4534 cnt++;
4535 }
4536 rcu_read_unlock();
4537 } else {
4538 cnt = 1;
4539 }
4540 goto done;
4541 }
4542
4543 /* Protection on the bound address list is not needed,
4544 * since in the socket option context we hold the socket lock,
4545 * so there is no way that the bound address list can change.
4546 */
4547 list_for_each_entry(addr, &bp->address_list, list) {
4548 cnt ++;
4549 }
4550done:
4551 return cnt;
4552}
4553
4554/* Helper function that copies local addresses to user and returns the number
4555 * of addresses copied.
4556 */
4557static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4558 int max_addrs, void *to,
4559 int *bytes_copied)
4560{
4561 struct sctp_sockaddr_entry *addr;
4562 union sctp_addr temp;
4563 int cnt = 0;
4564 int addrlen;
4565
4566 rcu_read_lock();
4567 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
4568 if (!addr->valid)
4569 continue;
4570
4571 if ((PF_INET == sk->sk_family) &&
4572 (AF_INET6 == addr->a.sa.sa_family))
4573 continue;
4574 if ((PF_INET6 == sk->sk_family) &&
4575 inet_v6_ipv6only(sk) &&
4576 (AF_INET == addr->a.sa.sa_family))
4577 continue;
4578 memcpy(&temp, &addr->a, sizeof(temp));
4579 if (!temp.v4.sin_port)
4580 temp.v4.sin_port = htons(port);
4581
4582 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4583 &temp);
4584 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4585 memcpy(to, &temp, addrlen);
4586
4587 to += addrlen;
4588 *bytes_copied += addrlen;
4589 cnt ++;
4590 if (cnt >= max_addrs) break;
4591 }
4592 rcu_read_unlock();
4593
4594 return cnt;
4595}
4596
4597static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4406static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4598 size_t space_left, int *bytes_copied) 4407 size_t space_left, int *bytes_copied)
4599{ 4408{
@@ -4637,112 +4446,6 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4637 return cnt; 4446 return cnt;
4638} 4447}
4639 4448
4640/* Old API for getting list of local addresses. Does not work for 32-bit
4641 * programs running on a 64-bit kernel
4642 */
4643static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4644 char __user *optval, int __user *optlen)
4645{
4646 struct sctp_bind_addr *bp;
4647 struct sctp_association *asoc;
4648 int cnt = 0;
4649 struct sctp_getaddrs_old getaddrs;
4650 struct sctp_sockaddr_entry *addr;
4651 void __user *to;
4652 union sctp_addr temp;
4653 struct sctp_sock *sp = sctp_sk(sk);
4654 int addrlen;
4655 int err = 0;
4656 void *addrs;
4657 void *buf;
4658 int bytes_copied = 0;
4659
4660 if (len < sizeof(struct sctp_getaddrs_old))
4661 return -EINVAL;
4662
4663 len = sizeof(struct sctp_getaddrs_old);
4664 if (copy_from_user(&getaddrs, optval, len))
4665 return -EFAULT;
4666
4667 if (getaddrs.addr_num <= 0 ||
4668 getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
4669 return -EINVAL;
4670
4671 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD "
4672 "socket option deprecated\n");
4673
4674 /*
4675 * For UDP-style sockets, id specifies the association to query.
4676 * If the id field is set to the value '0' then the locally bound
4677 * addresses are returned without regard to any particular
4678 * association.
4679 */
4680 if (0 == getaddrs.assoc_id) {
4681 bp = &sctp_sk(sk)->ep->base.bind_addr;
4682 } else {
4683 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4684 if (!asoc)
4685 return -EINVAL;
4686 bp = &asoc->base.bind_addr;
4687 }
4688
4689 to = getaddrs.addrs;
4690
4691 /* Allocate space for a local instance of packed array to hold all
4692 * the data. We store addresses here first and then put write them
4693 * to the user in one shot.
4694 */
4695 addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num,
4696 GFP_KERNEL);
4697 if (!addrs)
4698 return -ENOMEM;
4699
4700 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4701 * addresses from the global local address list.
4702 */
4703 if (sctp_list_single_entry(&bp->address_list)) {
4704 addr = list_entry(bp->address_list.next,
4705 struct sctp_sockaddr_entry, list);
4706 if (sctp_is_any(sk, &addr->a)) {
4707 cnt = sctp_copy_laddrs_old(sk, bp->port,
4708 getaddrs.addr_num,
4709 addrs, &bytes_copied);
4710 goto copy_getaddrs;
4711 }
4712 }
4713
4714 buf = addrs;
4715 /* Protection on the bound address list is not needed since
4716 * in the socket option context we hold a socket lock and
4717 * thus the bound address list can't change.
4718 */
4719 list_for_each_entry(addr, &bp->address_list, list) {
4720 memcpy(&temp, &addr->a, sizeof(temp));
4721 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4722 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4723 memcpy(buf, &temp, addrlen);
4724 buf += addrlen;
4725 bytes_copied += addrlen;
4726 cnt ++;
4727 if (cnt >= getaddrs.addr_num) break;
4728 }
4729
4730copy_getaddrs:
4731 /* copy the entire address list into the user provided space */
4732 if (copy_to_user(to, addrs, bytes_copied)) {
4733 err = -EFAULT;
4734 goto error;
4735 }
4736
4737 /* copy the leading structure back to user */
4738 getaddrs.addr_num = cnt;
4739 if (copy_to_user(optval, &getaddrs, len))
4740 err = -EFAULT;
4741
4742error:
4743 kfree(addrs);
4744 return err;
4745}
4746 4449
4747static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4450static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4748 char __user *optval, int __user *optlen) 4451 char __user *optval, int __user *optlen)
@@ -5593,22 +5296,6 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5593 case SCTP_INITMSG: 5296 case SCTP_INITMSG:
5594 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5297 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
5595 break; 5298 break;
5596 case SCTP_GET_PEER_ADDRS_NUM_OLD:
5597 retval = sctp_getsockopt_peer_addrs_num_old(sk, len, optval,
5598 optlen);
5599 break;
5600 case SCTP_GET_LOCAL_ADDRS_NUM_OLD:
5601 retval = sctp_getsockopt_local_addrs_num_old(sk, len, optval,
5602 optlen);
5603 break;
5604 case SCTP_GET_PEER_ADDRS_OLD:
5605 retval = sctp_getsockopt_peer_addrs_old(sk, len, optval,
5606 optlen);
5607 break;
5608 case SCTP_GET_LOCAL_ADDRS_OLD:
5609 retval = sctp_getsockopt_local_addrs_old(sk, len, optval,
5610 optlen);
5611 break;
5612 case SCTP_GET_PEER_ADDRS: 5299 case SCTP_GET_PEER_ADDRS:
5613 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5300 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
5614 optlen); 5301 optlen);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ab7151da120f..ae03ded2bf1a 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -52,6 +52,7 @@ static int int_max = INT_MAX;
52static int sack_timer_min = 1; 52static int sack_timer_min = 1;
53static int sack_timer_max = 500; 53static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16;
55 56
56extern int sysctl_sctp_mem[3]; 57extern int sysctl_sctp_mem[3];
57extern int sysctl_sctp_rmem[3]; 58extern int sysctl_sctp_rmem[3];
@@ -284,6 +285,18 @@ static ctl_table sctp_table[] = {
284 .extra1 = &zero, 285 .extra1 = &zero,
285 .extra2 = &addr_scope_max, 286 .extra2 = &addr_scope_max,
286 }, 287 },
288 {
289 .ctl_name = CTL_UNNUMBERED,
290 .procname = "rwnd_update_shift",
291 .data = &sctp_rwnd_upd_shift,
292 .maxlen = sizeof(int),
293 .mode = 0644,
294 .proc_handler = &proc_dointvec_minmax,
295 .strategy = &sysctl_intvec,
296 .extra1 = &one,
297 .extra2 = &rwnd_scale_max,
298 },
299
287 { .ctl_name = 0 } 300 { .ctl_name = 0 }
288}; 301};
289 302
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c256e4839316..b827d21dbe54 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
74 * given destination transport address, set RTO to the protocol 74 * given destination transport address, set RTO to the protocol
75 * parameter 'RTO.Initial'. 75 * parameter 'RTO.Initial'.
76 */ 76 */
77 peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial); 77 peer->rto = msecs_to_jiffies(sctp_rto_initial);
78 peer->rtt = 0; 78 peer->rtt = 0;
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
@@ -83,7 +83,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
83 peer->fast_recovery = 0; 83 peer->fast_recovery = 0;
84 84
85 peer->last_time_heard = jiffies; 85 peer->last_time_heard = jiffies;
86 peer->last_time_used = jiffies;
87 peer->last_time_ecne_reduced = jiffies; 86 peer->last_time_ecne_reduced = jiffies;
88 87
89 peer->init_sent_count = 0; 88 peer->init_sent_count = 0;
@@ -308,7 +307,8 @@ void sctp_transport_route(struct sctp_transport *transport,
308 /* Initialize sk->sk_rcv_saddr, if the transport is the 307 /* Initialize sk->sk_rcv_saddr, if the transport is the
309 * association's active path for getsockname(). 308 * association's active path for getsockname().
310 */ 309 */
311 if (asoc && (transport == asoc->peer.active_path)) 310 if (asoc && (!asoc->peer.primary_path ||
311 (transport == asoc->peer.active_path)))
312 opt->pf->af->to_sk_saddr(&transport->saddr, 312 opt->pf->af->to_sk_saddr(&transport->saddr,
313 asoc->base.sk); 313 asoc->base.sk);
314 } else 314 } else
@@ -385,7 +385,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
385 tp->rto = tp->asoc->rto_max; 385 tp->rto = tp->asoc->rto_max;
386 386
387 tp->rtt = rtt; 387 tp->rtt = rtt;
388 tp->last_rto = tp->rto;
389 388
390 /* Reset rto_pending so that a new RTT measurement is started when a 389 /* Reset rto_pending so that a new RTT measurement is started when a
391 * new data chunk is sent. 390 * new data chunk is sent.
@@ -564,10 +563,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
564 * to be done every RTO interval, we do it every hearbeat 563 * to be done every RTO interval, we do it every hearbeat
565 * interval. 564 * interval.
566 */ 565 */
567 if (time_after(jiffies, transport->last_time_used + 566 transport->cwnd = max(transport->cwnd/2,
568 transport->rto)) 567 4*transport->asoc->pathmtu);
569 transport->cwnd = max(transport->cwnd/2,
570 4*transport->asoc->pathmtu);
571 break; 568 break;
572 } 569 }
573 570
@@ -578,6 +575,43 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
578 transport->cwnd, transport->ssthresh); 575 transport->cwnd, transport->ssthresh);
579} 576}
580 577
578/* Apply Max.Burst limit to the congestion window:
579 * sctpimpguide-05 2.14.2
580 * D) When the time comes for the sender to
581 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
582 * first be applied to limit how many new DATA chunks may be sent.
583 * The limit is applied by adjusting cwnd as follows:
584 * if ((flightsize+ Max.Burst * MTU) < cwnd)
585 * cwnd = flightsize + Max.Burst * MTU
586 */
587
588void sctp_transport_burst_limited(struct sctp_transport *t)
589{
590 struct sctp_association *asoc = t->asoc;
591 u32 old_cwnd = t->cwnd;
592 u32 max_burst_bytes;
593
594 if (t->burst_limited)
595 return;
596
597 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
598 if (max_burst_bytes < old_cwnd) {
599 t->cwnd = max_burst_bytes;
600 t->burst_limited = old_cwnd;
601 }
602}
603
604/* Restore the old cwnd congestion window, after the burst had it's
605 * desired effect.
606 */
607void sctp_transport_burst_reset(struct sctp_transport *t)
608{
609 if (t->burst_limited) {
610 t->cwnd = t->burst_limited;
611 t->burst_limited = 0;
612 }
613}
614
581/* What is the next timeout value for this transport? */ 615/* What is the next timeout value for this transport? */
582unsigned long sctp_transport_timeout(struct sctp_transport *t) 616unsigned long sctp_transport_timeout(struct sctp_transport *t)
583{ 617{
@@ -600,8 +634,9 @@ void sctp_transport_reset(struct sctp_transport *t)
600 * (see Section 6.2.1) 634 * (see Section 6.2.1)
601 */ 635 */
602 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 636 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
637 t->burst_limited = 0;
603 t->ssthresh = asoc->peer.i.a_rwnd; 638 t->ssthresh = asoc->peer.i.a_rwnd;
604 t->last_rto = t->rto = asoc->rto_initial; 639 t->rto = asoc->rto_initial;
605 t->rtt = 0; 640 t->rtt = 0;
606 t->srtt = 0; 641 t->srtt = 0;
607 t->rttvar = 0; 642 t->rttvar = 0;
diff --git a/net/socket.c b/net/socket.c
index 9dff31c9b799..b94c3dd71015 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -97,6 +97,12 @@
97#include <net/sock.h> 97#include <net/sock.h>
98#include <linux/netfilter.h> 98#include <linux/netfilter.h>
99 99
100#include <linux/if_tun.h>
101#include <linux/ipv6_route.h>
102#include <linux/route.h>
103#include <linux/sockios.h>
104#include <linux/atalk.h>
105
100static int sock_no_open(struct inode *irrelevant, struct file *dontcare); 106static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
101static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, 107static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
102 unsigned long nr_segs, loff_t pos); 108 unsigned long nr_segs, loff_t pos);
@@ -919,6 +925,24 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
919 925
920EXPORT_SYMBOL(dlci_ioctl_set); 926EXPORT_SYMBOL(dlci_ioctl_set);
921 927
928static long sock_do_ioctl(struct net *net, struct socket *sock,
929 unsigned int cmd, unsigned long arg)
930{
931 int err;
932 void __user *argp = (void __user *)arg;
933
934 err = sock->ops->ioctl(sock, cmd, arg);
935
936 /*
937 * If this ioctl is unknown try to hand it down
938 * to the NIC driver.
939 */
940 if (err == -ENOIOCTLCMD)
941 err = dev_ioctl(net, cmd, argp);
942
943 return err;
944}
945
922/* 946/*
923 * With an ioctl, arg may well be a user mode pointer, but we don't know 947 * With an ioctl, arg may well be a user mode pointer, but we don't know
924 * what to do with it - that's up to the protocol still. 948 * what to do with it - that's up to the protocol still.
@@ -992,14 +1016,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
992 mutex_unlock(&dlci_ioctl_mutex); 1016 mutex_unlock(&dlci_ioctl_mutex);
993 break; 1017 break;
994 default: 1018 default:
995 err = sock->ops->ioctl(sock, cmd, arg); 1019 err = sock_do_ioctl(net, sock, cmd, arg);
996
997 /*
998 * If this ioctl is unknown try to hand it down
999 * to the NIC driver.
1000 */
1001 if (err == -ENOIOCTLCMD)
1002 err = dev_ioctl(net, cmd, argp);
1003 break; 1020 break;
1004 } 1021 }
1005 return err; 1022 return err;
@@ -1252,7 +1269,7 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
1252 /* Now protected by module ref count */ 1269 /* Now protected by module ref count */
1253 rcu_read_unlock(); 1270 rcu_read_unlock();
1254 1271
1255 err = pf->create(net, sock, protocol); 1272 err = pf->create(net, sock, protocol, kern);
1256 if (err < 0) 1273 if (err < 0)
1257 goto out_module_put; 1274 goto out_module_put;
1258 1275
@@ -2127,6 +2144,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2127 int fput_needed, err, datagrams; 2144 int fput_needed, err, datagrams;
2128 struct socket *sock; 2145 struct socket *sock;
2129 struct mmsghdr __user *entry; 2146 struct mmsghdr __user *entry;
2147 struct compat_mmsghdr __user *compat_entry;
2130 struct msghdr msg_sys; 2148 struct msghdr msg_sys;
2131 struct timespec end_time; 2149 struct timespec end_time;
2132 2150
@@ -2146,19 +2164,30 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2146 goto out_put; 2164 goto out_put;
2147 2165
2148 entry = mmsg; 2166 entry = mmsg;
2167 compat_entry = (struct compat_mmsghdr __user *)mmsg;
2149 2168
2150 while (datagrams < vlen) { 2169 while (datagrams < vlen) {
2151 /* 2170 /*
2152 * No need to ask LSM for more than the first datagram. 2171 * No need to ask LSM for more than the first datagram.
2153 */ 2172 */
2154 err = __sys_recvmsg(sock, (struct msghdr __user *)entry, 2173 if (MSG_CMSG_COMPAT & flags) {
2155 &msg_sys, flags, datagrams); 2174 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
2156 if (err < 0) 2175 &msg_sys, flags, datagrams);
2157 break; 2176 if (err < 0)
2158 err = put_user(err, &entry->msg_len); 2177 break;
2178 err = __put_user(err, &compat_entry->msg_len);
2179 ++compat_entry;
2180 } else {
2181 err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
2182 &msg_sys, flags, datagrams);
2183 if (err < 0)
2184 break;
2185 err = put_user(err, &entry->msg_len);
2186 ++entry;
2187 }
2188
2159 if (err) 2189 if (err)
2160 break; 2190 break;
2161 ++entry;
2162 ++datagrams; 2191 ++datagrams;
2163 2192
2164 if (timeout) { 2193 if (timeout) {
@@ -2459,6 +2488,552 @@ void socket_seq_show(struct seq_file *seq)
2459#endif /* CONFIG_PROC_FS */ 2488#endif /* CONFIG_PROC_FS */
2460 2489
2461#ifdef CONFIG_COMPAT 2490#ifdef CONFIG_COMPAT
2491static int do_siocgstamp(struct net *net, struct socket *sock,
2492 unsigned int cmd, struct compat_timeval __user *up)
2493{
2494 mm_segment_t old_fs = get_fs();
2495 struct timeval ktv;
2496 int err;
2497
2498 set_fs(KERNEL_DS);
2499 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2500 set_fs(old_fs);
2501 if (!err) {
2502 err = put_user(ktv.tv_sec, &up->tv_sec);
2503 err |= __put_user(ktv.tv_usec, &up->tv_usec);
2504 }
2505 return err;
2506}
2507
2508static int do_siocgstampns(struct net *net, struct socket *sock,
2509 unsigned int cmd, struct compat_timespec __user *up)
2510{
2511 mm_segment_t old_fs = get_fs();
2512 struct timespec kts;
2513 int err;
2514
2515 set_fs(KERNEL_DS);
2516 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2517 set_fs(old_fs);
2518 if (!err) {
2519 err = put_user(kts.tv_sec, &up->tv_sec);
2520 err |= __put_user(kts.tv_nsec, &up->tv_nsec);
2521 }
2522 return err;
2523}
2524
2525static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32)
2526{
2527 struct ifreq __user *uifr;
2528 int err;
2529
2530 uifr = compat_alloc_user_space(sizeof(struct ifreq));
2531 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
2532 return -EFAULT;
2533
2534 err = dev_ioctl(net, SIOCGIFNAME, uifr);
2535 if (err)
2536 return err;
2537
2538 if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq)))
2539 return -EFAULT;
2540
2541 return 0;
2542}
2543
2544static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
2545{
2546 struct compat_ifconf ifc32;
2547 struct ifconf ifc;
2548 struct ifconf __user *uifc;
2549 struct compat_ifreq __user *ifr32;
2550 struct ifreq __user *ifr;
2551 unsigned int i, j;
2552 int err;
2553
2554 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
2555 return -EFAULT;
2556
2557 if (ifc32.ifcbuf == 0) {
2558 ifc32.ifc_len = 0;
2559 ifc.ifc_len = 0;
2560 ifc.ifc_req = NULL;
2561 uifc = compat_alloc_user_space(sizeof(struct ifconf));
2562 } else {
2563 size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) *
2564 sizeof (struct ifreq);
2565 uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
2566 ifc.ifc_len = len;
2567 ifr = ifc.ifc_req = (void __user *)(uifc + 1);
2568 ifr32 = compat_ptr(ifc32.ifcbuf);
2569 for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) {
2570 if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
2571 return -EFAULT;
2572 ifr++;
2573 ifr32++;
2574 }
2575 }
2576 if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
2577 return -EFAULT;
2578
2579 err = dev_ioctl(net, SIOCGIFCONF, uifc);
2580 if (err)
2581 return err;
2582
2583 if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
2584 return -EFAULT;
2585
2586 ifr = ifc.ifc_req;
2587 ifr32 = compat_ptr(ifc32.ifcbuf);
2588 for (i = 0, j = 0;
2589 i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
2590 i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) {
2591 if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq)))
2592 return -EFAULT;
2593 ifr32++;
2594 ifr++;
2595 }
2596
2597 if (ifc32.ifcbuf == 0) {
2598 /* Translate from 64-bit structure multiple to
2599 * a 32-bit one.
2600 */
2601 i = ifc.ifc_len;
2602 i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq));
2603 ifc32.ifc_len = i;
2604 } else {
2605 ifc32.ifc_len = i;
2606 }
2607 if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf)))
2608 return -EFAULT;
2609
2610 return 0;
2611}
2612
2613static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2614{
2615 struct ifreq __user *ifr;
2616 u32 data;
2617 void __user *datap;
2618
2619 ifr = compat_alloc_user_space(sizeof(*ifr));
2620
2621 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2622 return -EFAULT;
2623
2624 if (get_user(data, &ifr32->ifr_ifru.ifru_data))
2625 return -EFAULT;
2626
2627 datap = compat_ptr(data);
2628 if (put_user(datap, &ifr->ifr_ifru.ifru_data))
2629 return -EFAULT;
2630
2631 return dev_ioctl(net, SIOCETHTOOL, ifr);
2632}
2633
2634static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
2635{
2636 void __user *uptr;
2637 compat_uptr_t uptr32;
2638 struct ifreq __user *uifr;
2639
2640 uifr = compat_alloc_user_space(sizeof (*uifr));
2641 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
2642 return -EFAULT;
2643
2644 if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu))
2645 return -EFAULT;
2646
2647 uptr = compat_ptr(uptr32);
2648
2649 if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc))
2650 return -EFAULT;
2651
2652 return dev_ioctl(net, SIOCWANDEV, uifr);
2653}
2654
2655static int bond_ioctl(struct net *net, unsigned int cmd,
2656 struct compat_ifreq __user *ifr32)
2657{
2658 struct ifreq kifr;
2659 struct ifreq __user *uifr;
2660 mm_segment_t old_fs;
2661 int err;
2662 u32 data;
2663 void __user *datap;
2664
2665 switch (cmd) {
2666 case SIOCBONDENSLAVE:
2667 case SIOCBONDRELEASE:
2668 case SIOCBONDSETHWADDR:
2669 case SIOCBONDCHANGEACTIVE:
2670 if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq)))
2671 return -EFAULT;
2672
2673 old_fs = get_fs();
2674 set_fs (KERNEL_DS);
2675 err = dev_ioctl(net, cmd, &kifr);
2676 set_fs (old_fs);
2677
2678 return err;
2679 case SIOCBONDSLAVEINFOQUERY:
2680 case SIOCBONDINFOQUERY:
2681 uifr = compat_alloc_user_space(sizeof(*uifr));
2682 if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2683 return -EFAULT;
2684
2685 if (get_user(data, &ifr32->ifr_ifru.ifru_data))
2686 return -EFAULT;
2687
2688 datap = compat_ptr(data);
2689 if (put_user(datap, &uifr->ifr_ifru.ifru_data))
2690 return -EFAULT;
2691
2692 return dev_ioctl(net, cmd, uifr);
2693 default:
2694 return -EINVAL;
2695 };
2696}
2697
2698static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
2699 struct compat_ifreq __user *u_ifreq32)
2700{
2701 struct ifreq __user *u_ifreq64;
2702 char tmp_buf[IFNAMSIZ];
2703 void __user *data64;
2704 u32 data32;
2705
2706 if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
2707 IFNAMSIZ))
2708 return -EFAULT;
2709 if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
2710 return -EFAULT;
2711 data64 = compat_ptr(data32);
2712
2713 u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
2714
2715 /* Don't check these user accesses, just let that get trapped
2716 * in the ioctl handler instead.
2717 */
2718 if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
2719 IFNAMSIZ))
2720 return -EFAULT;
2721 if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
2722 return -EFAULT;
2723
2724 return dev_ioctl(net, cmd, u_ifreq64);
2725}
2726
2727static int dev_ifsioc(struct net *net, struct socket *sock,
2728 unsigned int cmd, struct compat_ifreq __user *uifr32)
2729{
2730 struct ifreq __user *uifr;
2731 int err;
2732
2733 uifr = compat_alloc_user_space(sizeof(*uifr));
2734 if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
2735 return -EFAULT;
2736
2737 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
2738
2739 if (!err) {
2740 switch (cmd) {
2741 case SIOCGIFFLAGS:
2742 case SIOCGIFMETRIC:
2743 case SIOCGIFMTU:
2744 case SIOCGIFMEM:
2745 case SIOCGIFHWADDR:
2746 case SIOCGIFINDEX:
2747 case SIOCGIFADDR:
2748 case SIOCGIFBRDADDR:
2749 case SIOCGIFDSTADDR:
2750 case SIOCGIFNETMASK:
2751 case SIOCGIFPFLAGS:
2752 case SIOCGIFTXQLEN:
2753 case SIOCGMIIPHY:
2754 case SIOCGMIIREG:
2755 if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
2756 err = -EFAULT;
2757 break;
2758 }
2759 }
2760 return err;
2761}
2762
2763static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
2764 struct compat_ifreq __user *uifr32)
2765{
2766 struct ifreq ifr;
2767 struct compat_ifmap __user *uifmap32;
2768 mm_segment_t old_fs;
2769 int err;
2770
2771 uifmap32 = &uifr32->ifr_ifru.ifru_map;
2772 err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
2773 err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
2774 err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
2775 err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
2776 err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
2777 err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
2778 err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
2779 if (err)
2780 return -EFAULT;
2781
2782 old_fs = get_fs();
2783 set_fs (KERNEL_DS);
2784 err = dev_ioctl(net, cmd, (void __user *)&ifr);
2785 set_fs (old_fs);
2786
2787 if (cmd == SIOCGIFMAP && !err) {
2788 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
2789 err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
2790 err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
2791 err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
2792 err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
2793 err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
2794 err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
2795 if (err)
2796 err = -EFAULT;
2797 }
2798 return err;
2799}
2800
2801static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32)
2802{
2803 void __user *uptr;
2804 compat_uptr_t uptr32;
2805 struct ifreq __user *uifr;
2806
2807 uifr = compat_alloc_user_space(sizeof (*uifr));
2808 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
2809 return -EFAULT;
2810
2811 if (get_user(uptr32, &uifr32->ifr_data))
2812 return -EFAULT;
2813
2814 uptr = compat_ptr(uptr32);
2815
2816 if (put_user(uptr, &uifr->ifr_data))
2817 return -EFAULT;
2818
2819 return dev_ioctl(net, SIOCSHWTSTAMP, uifr);
2820}
2821
2822struct rtentry32 {
2823 u32 rt_pad1;
2824 struct sockaddr rt_dst; /* target address */
2825 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
2826 struct sockaddr rt_genmask; /* target network mask (IP) */
2827 unsigned short rt_flags;
2828 short rt_pad2;
2829 u32 rt_pad3;
2830 unsigned char rt_tos;
2831 unsigned char rt_class;
2832 short rt_pad4;
2833 short rt_metric; /* +1 for binary compatibility! */
2834 /* char * */ u32 rt_dev; /* forcing the device at add */
2835 u32 rt_mtu; /* per route MTU/Window */
2836 u32 rt_window; /* Window clamping */
2837 unsigned short rt_irtt; /* Initial RTT */
2838};
2839
2840struct in6_rtmsg32 {
2841 struct in6_addr rtmsg_dst;
2842 struct in6_addr rtmsg_src;
2843 struct in6_addr rtmsg_gateway;
2844 u32 rtmsg_type;
2845 u16 rtmsg_dst_len;
2846 u16 rtmsg_src_len;
2847 u32 rtmsg_metric;
2848 u32 rtmsg_info;
2849 u32 rtmsg_flags;
2850 s32 rtmsg_ifindex;
2851};
2852
2853static int routing_ioctl(struct net *net, struct socket *sock,
2854 unsigned int cmd, void __user *argp)
2855{
2856 int ret;
2857 void *r = NULL;
2858 struct in6_rtmsg r6;
2859 struct rtentry r4;
2860 char devname[16];
2861 u32 rtdev;
2862 mm_segment_t old_fs = get_fs();
2863
2864 if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */
2865 struct in6_rtmsg32 __user *ur6 = argp;
2866 ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
2867 3 * sizeof(struct in6_addr));
2868 ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
2869 ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
2870 ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
2871 ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
2872 ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
2873 ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
2874 ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
2875
2876 r = (void *) &r6;
2877 } else { /* ipv4 */
2878 struct rtentry32 __user *ur4 = argp;
2879 ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
2880 3 * sizeof(struct sockaddr));
2881 ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
2882 ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
2883 ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
2884 ret |= __get_user (r4.rt_window, &(ur4->rt_window));
2885 ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
2886 ret |= __get_user (rtdev, &(ur4->rt_dev));
2887 if (rtdev) {
2888 ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
2889 r4.rt_dev = devname; devname[15] = 0;
2890 } else
2891 r4.rt_dev = NULL;
2892
2893 r = (void *) &r4;
2894 }
2895
2896 if (ret) {
2897 ret = -EFAULT;
2898 goto out;
2899 }
2900
2901 set_fs (KERNEL_DS);
2902 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
2903 set_fs (old_fs);
2904
2905out:
2906 return ret;
2907}
2908
2909/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
2910 * for some operations; this forces use of the newer bridge-utils that
2911 * use compatiable ioctls
2912 */
2913static int old_bridge_ioctl(compat_ulong_t __user *argp)
2914{
2915 compat_ulong_t tmp;
2916
2917 if (get_user(tmp, argp))
2918 return -EFAULT;
2919 if (tmp == BRCTL_GET_VERSION)
2920 return BRCTL_VERSION + 1;
2921 return -EINVAL;
2922}
2923
2924static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
2925 unsigned int cmd, unsigned long arg)
2926{
2927 void __user *argp = compat_ptr(arg);
2928 struct sock *sk = sock->sk;
2929 struct net *net = sock_net(sk);
2930
2931 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
2932 return siocdevprivate_ioctl(net, cmd, argp);
2933
2934 switch (cmd) {
2935 case SIOCSIFBR:
2936 case SIOCGIFBR:
2937 return old_bridge_ioctl(argp);
2938 case SIOCGIFNAME:
2939 return dev_ifname32(net, argp);
2940 case SIOCGIFCONF:
2941 return dev_ifconf(net, argp);
2942 case SIOCETHTOOL:
2943 return ethtool_ioctl(net, argp);
2944 case SIOCWANDEV:
2945 return compat_siocwandev(net, argp);
2946 case SIOCGIFMAP:
2947 case SIOCSIFMAP:
2948 return compat_sioc_ifmap(net, cmd, argp);
2949 case SIOCBONDENSLAVE:
2950 case SIOCBONDRELEASE:
2951 case SIOCBONDSETHWADDR:
2952 case SIOCBONDSLAVEINFOQUERY:
2953 case SIOCBONDINFOQUERY:
2954 case SIOCBONDCHANGEACTIVE:
2955 return bond_ioctl(net, cmd, argp);
2956 case SIOCADDRT:
2957 case SIOCDELRT:
2958 return routing_ioctl(net, sock, cmd, argp);
2959 case SIOCGSTAMP:
2960 return do_siocgstamp(net, sock, cmd, argp);
2961 case SIOCGSTAMPNS:
2962 return do_siocgstampns(net, sock, cmd, argp);
2963 case SIOCSHWTSTAMP:
2964 return compat_siocshwtstamp(net, argp);
2965
2966 case FIOSETOWN:
2967 case SIOCSPGRP:
2968 case FIOGETOWN:
2969 case SIOCGPGRP:
2970 case SIOCBRADDBR:
2971 case SIOCBRDELBR:
2972 case SIOCGIFVLAN:
2973 case SIOCSIFVLAN:
2974 case SIOCADDDLCI:
2975 case SIOCDELDLCI:
2976 return sock_ioctl(file, cmd, arg);
2977
2978 case SIOCGIFFLAGS:
2979 case SIOCSIFFLAGS:
2980 case SIOCGIFMETRIC:
2981 case SIOCSIFMETRIC:
2982 case SIOCGIFMTU:
2983 case SIOCSIFMTU:
2984 case SIOCGIFMEM:
2985 case SIOCSIFMEM:
2986 case SIOCGIFHWADDR:
2987 case SIOCSIFHWADDR:
2988 case SIOCADDMULTI:
2989 case SIOCDELMULTI:
2990 case SIOCGIFINDEX:
2991 case SIOCGIFADDR:
2992 case SIOCSIFADDR:
2993 case SIOCSIFHWBROADCAST:
2994 case SIOCDIFADDR:
2995 case SIOCGIFBRDADDR:
2996 case SIOCSIFBRDADDR:
2997 case SIOCGIFDSTADDR:
2998 case SIOCSIFDSTADDR:
2999 case SIOCGIFNETMASK:
3000 case SIOCSIFNETMASK:
3001 case SIOCSIFPFLAGS:
3002 case SIOCGIFPFLAGS:
3003 case SIOCGIFTXQLEN:
3004 case SIOCSIFTXQLEN:
3005 case SIOCBRADDIF:
3006 case SIOCBRDELIF:
3007 case SIOCSIFNAME:
3008 case SIOCGMIIPHY:
3009 case SIOCGMIIREG:
3010 case SIOCSMIIREG:
3011 return dev_ifsioc(net, sock, cmd, argp);
3012
3013 case SIOCSARP:
3014 case SIOCGARP:
3015 case SIOCDARP:
3016 case SIOCATMARK:
3017 return sock_do_ioctl(net, sock, cmd, arg);
3018 }
3019
3020 /* Prevent warning from compat_sys_ioctl, these always
3021 * result in -EINVAL in the native case anyway. */
3022 switch (cmd) {
3023 case SIOCRTMSG:
3024 case SIOCGIFCOUNT:
3025 case SIOCSRARP:
3026 case SIOCGRARP:
3027 case SIOCDRARP:
3028 case SIOCSIFLINK:
3029 case SIOCGIFSLAVE:
3030 case SIOCSIFSLAVE:
3031 return -EINVAL;
3032 }
3033
3034 return -ENOIOCTLCMD;
3035}
3036
2462static long compat_sock_ioctl(struct file *file, unsigned cmd, 3037static long compat_sock_ioctl(struct file *file, unsigned cmd,
2463 unsigned long arg) 3038 unsigned long arg)
2464{ 3039{
@@ -2477,6 +3052,9 @@ static long compat_sock_ioctl(struct file *file, unsigned cmd,
2477 (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) 3052 (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
2478 ret = compat_wext_handle_ioctl(net, cmd, arg); 3053 ret = compat_wext_handle_ioctl(net, cmd, arg);
2479 3054
3055 if (ret == -ENOIOCTLCMD)
3056 ret = compat_sock_ioctl_trans(file, sock, cmd, arg);
3057
2480 return ret; 3058 return ret;
2481} 3059}
2482#endif 3060#endif
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 22e8fd89477f..c7450c8f0a7c 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -306,24 +306,25 @@ EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
306 * @sap: buffer into which to plant socket address 306 * @sap: buffer into which to plant socket address
307 * @salen: size of buffer 307 * @salen: size of buffer
308 * 308 *
309 * @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
310 * rpc_pton() require proper string termination to be successful.
311 *
309 * Returns the size of the socket address if successful; otherwise 312 * Returns the size of the socket address if successful; otherwise
310 * zero is returned. 313 * zero is returned.
311 */ 314 */
312size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len, 315size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
313 struct sockaddr *sap, const size_t salen) 316 struct sockaddr *sap, const size_t salen)
314{ 317{
315 char *c, buf[RPCBIND_MAXUADDRLEN]; 318 char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
316 unsigned long portlo, porthi; 319 unsigned long portlo, porthi;
317 unsigned short port; 320 unsigned short port;
318 321
319 if (uaddr_len > sizeof(buf)) 322 if (uaddr_len > RPCBIND_MAXUADDRLEN)
320 return 0; 323 return 0;
321 324
322 memcpy(buf, uaddr, uaddr_len); 325 memcpy(buf, uaddr, uaddr_len);
323 326
324 buf[uaddr_len] = '\n'; 327 buf[uaddr_len] = '\0';
325 buf[uaddr_len + 1] = '\0';
326
327 c = strrchr(buf, '.'); 328 c = strrchr(buf, '.');
328 if (unlikely(c == NULL)) 329 if (unlikely(c == NULL))
329 return 0; 330 return 0;
@@ -332,9 +333,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
332 if (unlikely(portlo > 255)) 333 if (unlikely(portlo > 255))
333 return 0; 334 return 0;
334 335
335 c[0] = '\n'; 336 *c = '\0';
336 c[1] = '\0';
337
338 c = strrchr(buf, '.'); 337 c = strrchr(buf, '.');
339 if (unlikely(c == NULL)) 338 if (unlikely(c == NULL))
340 return 0; 339 return 0;
@@ -345,8 +344,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
345 344
346 port = (unsigned short)((porthi << 8) | portlo); 345 port = (unsigned short)((porthi << 8) | portlo);
347 346
348 c[0] = '\0'; 347 *c = '\0';
349
350 if (rpc_pton(buf, strlen(buf), sap, salen) == 0) 348 if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
351 return 0; 349 return 0;
352 350
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 54a4e042f104..7535a7bed2fa 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -332,9 +332,9 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
332 list_add_tail(&new->cr_lru, &free); 332 list_add_tail(&new->cr_lru, &free);
333 spin_unlock(&cache->lock); 333 spin_unlock(&cache->lock);
334found: 334found:
335 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) 335 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
336 && cred->cr_ops->cr_init != NULL 336 cred->cr_ops->cr_init != NULL &&
337 && !(flags & RPCAUTH_LOOKUP_NEW)) { 337 !(flags & RPCAUTH_LOOKUP_NEW)) {
338 int res = cred->cr_ops->cr_init(auth, cred); 338 int res = cred->cr_ops->cr_init(auth, cred);
339 if (res < 0) { 339 if (res < 0) {
340 put_rpccred(cred); 340 put_rpccred(cred);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index f160be6c1a46..17562b4c35f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -75,8 +75,8 @@ krb5_get_seq_num(struct crypto_blkcipher *key,
75 if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) 75 if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
76 return code; 76 return code;
77 77
78 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) 78 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
79 || (plain[4] != plain[7])) 79 (plain[4] != plain[7]))
80 return (s32)KG_BAD_SEQ; 80 return (s32)KG_BAD_SEQ;
81 81
82 *direction = plain[4]; 82 *direction = plain[4];
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index f6c51e562a02..e34bc531fcb9 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -105,8 +105,8 @@ static int rsi_match(struct cache_head *a, struct cache_head *b)
105{ 105{
106 struct rsi *item = container_of(a, struct rsi, h); 106 struct rsi *item = container_of(a, struct rsi, h);
107 struct rsi *tmp = container_of(b, struct rsi, h); 107 struct rsi *tmp = container_of(b, struct rsi, h);
108 return netobj_equal(&item->in_handle, &tmp->in_handle) 108 return netobj_equal(&item->in_handle, &tmp->in_handle) &&
109 && netobj_equal(&item->in_token, &tmp->in_token); 109 netobj_equal(&item->in_token, &tmp->in_token);
110} 110}
111 111
112static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len) 112static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index d6eee291a0e2..39bddba53ba1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -401,9 +401,8 @@ static int cache_clean(void)
401 for (; ch; cp= & ch->next, ch= *cp) { 401 for (; ch; cp= & ch->next, ch= *cp) {
402 if (current_detail->nextcheck > ch->expiry_time) 402 if (current_detail->nextcheck > ch->expiry_time)
403 current_detail->nextcheck = ch->expiry_time+1; 403 current_detail->nextcheck = ch->expiry_time+1;
404 if (ch->expiry_time >= get_seconds() 404 if (ch->expiry_time >= get_seconds() &&
405 && ch->last_refresh >= current_detail->flush_time 405 ch->last_refresh >= current_detail->flush_time)
406 )
407 continue; 406 continue;
408 if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) 407 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
409 cache_dequeue(current_detail, ch); 408 cache_dequeue(current_detail, ch);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 952f206ff307..538ca433a56c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1103,8 +1103,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1103 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1103 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1104 goto dropit; 1104 goto dropit;
1105 } 1105 }
1106 if (*statp == rpc_success && (xdr = procp->pc_encode) 1106 if (*statp == rpc_success &&
1107 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { 1107 (xdr = procp->pc_encode) &&
1108 !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1108 dprintk("svc: failed to encode reply\n"); 1109 dprintk("svc: failed to encode reply\n");
1109 /* serv->sv_stats->rpcsystemerr++; */ 1110 /* serv->sv_stats->rpcsystemerr++; */
1110 *statp = rpc_system_err; 1111 *statp = rpc_system_err;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index df124f78ee48..b845e2293dfe 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -129,8 +129,8 @@ static void svc_xprt_free(struct kref *kref)
129 struct svc_xprt *xprt = 129 struct svc_xprt *xprt =
130 container_of(kref, struct svc_xprt, xpt_ref); 130 container_of(kref, struct svc_xprt, xpt_ref);
131 struct module *owner = xprt->xpt_class->xcl_owner; 131 struct module *owner = xprt->xpt_class->xcl_owner;
132 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) 132 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) &&
133 && xprt->xpt_auth_cache != NULL) 133 xprt->xpt_auth_cache != NULL)
134 svcauth_unix_info_release(xprt->xpt_auth_cache); 134 svcauth_unix_info_release(xprt->xpt_auth_cache);
135 xprt->xpt_ops->xpo_free(xprt); 135 xprt->xpt_ops->xpo_free(xprt);
136 module_put(owner); 136 module_put(owner);
@@ -846,8 +846,8 @@ static void svc_age_temp_xprts(unsigned long closure)
846 * through, close it. */ 846 * through, close it. */
847 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 847 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
848 continue; 848 continue;
849 if (atomic_read(&xprt->xpt_ref.refcount) > 1 849 if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
850 || test_bit(XPT_BUSY, &xprt->xpt_flags)) 850 test_bit(XPT_BUSY, &xprt->xpt_flags))
851 continue; 851 continue;
852 svc_xprt_get(xprt); 852 svc_xprt_get(xprt);
853 list_move(le, &to_be_aged); 853 list_move(le, &to_be_aged);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index e64109b02aee..4e9393c24687 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -46,8 +46,8 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
46 dprintk("svc: svc_authenticate (%d)\n", flavor); 46 dprintk("svc: svc_authenticate (%d)\n", flavor);
47 47
48 spin_lock(&authtab_lock); 48 spin_lock(&authtab_lock);
49 if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor]) 49 if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor]) ||
50 || !try_module_get(aops->owner)) { 50 !try_module_get(aops->owner)) {
51 spin_unlock(&authtab_lock); 51 spin_unlock(&authtab_lock);
52 *authp = rpc_autherr_badcred; 52 *authp = rpc_autherr_badcred;
53 return SVC_DENIED; 53 return SVC_DENIED;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index f4c7ff3a53e6..4a8f6558718a 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -125,8 +125,8 @@ static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
125{ 125{
126 struct ip_map *orig = container_of(corig, struct ip_map, h); 126 struct ip_map *orig = container_of(corig, struct ip_map, h);
127 struct ip_map *new = container_of(cnew, struct ip_map, h); 127 struct ip_map *new = container_of(cnew, struct ip_map, h);
128 return strcmp(orig->m_class, new->m_class) == 0 128 return strcmp(orig->m_class, new->m_class) == 0 &&
129 && ipv6_addr_equal(&orig->m_addr, &new->m_addr); 129 ipv6_addr_equal(&orig->m_addr, &new->m_addr);
130} 130}
131static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 131static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
132{ 132{
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index c2a17876defd..870929e08e5d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -111,7 +111,7 @@ static void svc_release_skb(struct svc_rqst *rqstp)
111 rqstp->rq_xprt_ctxt = NULL; 111 rqstp->rq_xprt_ctxt = NULL;
112 112
113 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 113 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
114 skb_free_datagram(svsk->sk_sk, skb); 114 skb_free_datagram_locked(svsk->sk_sk, skb);
115 } 115 }
116} 116}
117 117
@@ -578,7 +578,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
578 "svc: received unknown control message %d/%d; " 578 "svc: received unknown control message %d/%d; "
579 "dropping RPC reply datagram\n", 579 "dropping RPC reply datagram\n",
580 cmh->cmsg_level, cmh->cmsg_type); 580 cmh->cmsg_level, cmh->cmsg_type);
581 skb_free_datagram(svsk->sk_sk, skb); 581 skb_free_datagram_locked(svsk->sk_sk, skb);
582 return 0; 582 return 0;
583 } 583 }
584 584
@@ -588,18 +588,18 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
588 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 588 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
589 local_bh_enable(); 589 local_bh_enable();
590 /* checksum error */ 590 /* checksum error */
591 skb_free_datagram(svsk->sk_sk, skb); 591 skb_free_datagram_locked(svsk->sk_sk, skb);
592 return 0; 592 return 0;
593 } 593 }
594 local_bh_enable(); 594 local_bh_enable();
595 skb_free_datagram(svsk->sk_sk, skb); 595 skb_free_datagram_locked(svsk->sk_sk, skb);
596 } else { 596 } else {
597 /* we can use it in-place */ 597 /* we can use it in-place */
598 rqstp->rq_arg.head[0].iov_base = skb->data + 598 rqstp->rq_arg.head[0].iov_base = skb->data +
599 sizeof(struct udphdr); 599 sizeof(struct udphdr);
600 rqstp->rq_arg.head[0].iov_len = len; 600 rqstp->rq_arg.head[0].iov_len = len;
601 if (skb_checksum_complete(skb)) { 601 if (skb_checksum_complete(skb)) {
602 skb_free_datagram(svsk->sk_sk, skb); 602 skb_free_datagram_locked(svsk->sk_sk, skb);
603 return 0; 603 return 0;
604 } 604 }
605 rqstp->rq_xprt_ctxt = skb; 605 rqstp->rq_xprt_ctxt = skb;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 9e884383134f..f92e37eb413c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -337,10 +337,9 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
337 337
338static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) 338static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
339{ 339{
340 if ((RDMA_TRANSPORT_IWARP == 340 if ((rdma_node_get_transport(xprt->sc_cm_id->device->node_type) ==
341 rdma_node_get_transport(xprt->sc_cm_id-> 341 RDMA_TRANSPORT_IWARP) &&
342 device->node_type)) 342 sge_count > 1)
343 && sge_count > 1)
344 return 1; 343 return 1;
345 else 344 else
346 return min_t(int, sge_count, xprt->sc_max_sge); 345 return min_t(int, sge_count, xprt->sc_max_sge);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 0cf5e8c27a10..3fa5751af0ec 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -42,6 +42,7 @@
42#include <linux/sunrpc/svc_xprt.h> 42#include <linux/sunrpc/svc_xprt.h>
43#include <linux/sunrpc/debug.h> 43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h> 44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/sched.h>
45#include <linux/spinlock.h> 46#include <linux/spinlock.h>
46#include <rdma/ib_verbs.h> 47#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h> 48#include <rdma/rdma_cm.h>
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 465aafc2007f..2209aa87d899 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -878,8 +878,8 @@ if (strnicmp(ia->ri_id->device->dma_device->bus->name, "pci", 3) == 0) {
878 * others indicate a transport condition which has already 878 * others indicate a transport condition which has already
879 * undergone a best-effort. 879 * undergone a best-effort.
880 */ 880 */
881 if (ep->rep_connected == -ECONNREFUSED 881 if (ep->rep_connected == -ECONNREFUSED &&
882 && ++retry_count <= RDMA_CONNECT_RETRY_MAX) { 882 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
883 dprintk("RPC: %s: non-peer_reject, retry\n", __func__); 883 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
884 goto retry; 884 goto retry;
885 } 885 }
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 689fdefe9d04..a7eac00cd363 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -437,11 +437,11 @@ void tipc_cltr_recv_routing_table(struct sk_buff *buf)
437 break; 437 break;
438 case ROUTE_ADDITION: 438 case ROUTE_ADDITION:
439 if (!is_slave(tipc_own_addr)) { 439 if (!is_slave(tipc_own_addr)) {
440 assert(!in_own_cluster(c_ptr->addr) 440 assert(!in_own_cluster(c_ptr->addr) ||
441 || is_slave(rem_node)); 441 is_slave(rem_node));
442 } else { 442 } else {
443 assert(in_own_cluster(c_ptr->addr) 443 assert(in_own_cluster(c_ptr->addr) &&
444 && !is_slave(rem_node)); 444 !is_slave(rem_node));
445 } 445 }
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)]; 446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
447 if (!n_ptr) 447 if (!n_ptr)
@@ -451,11 +451,11 @@ void tipc_cltr_recv_routing_table(struct sk_buff *buf)
451 break; 451 break;
452 case ROUTE_REMOVAL: 452 case ROUTE_REMOVAL:
453 if (!is_slave(tipc_own_addr)) { 453 if (!is_slave(tipc_own_addr)) {
454 assert(!in_own_cluster(c_ptr->addr) 454 assert(!in_own_cluster(c_ptr->addr) ||
455 || is_slave(rem_node)); 455 is_slave(rem_node));
456 } else { 456 } else {
457 assert(in_own_cluster(c_ptr->addr) 457 assert(in_own_cluster(c_ptr->addr) &&
458 && !is_slave(rem_node)); 458 !is_slave(rem_node));
459 } 459 }
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)]; 460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
461 if (n_ptr) 461 if (n_ptr)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index dd4c18b9a35b..6f50f6423f63 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -378,8 +378,8 @@ static void link_timeout(struct link *l_ptr)
378 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 378 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
379 u32 length = msg_size(msg); 379 u32 length = msg_size(msg);
380 380
381 if ((msg_user(msg) == MSG_FRAGMENTER) 381 if ((msg_user(msg) == MSG_FRAGMENTER) &&
382 && (msg_type(msg) == FIRST_FRAGMENT)) { 382 (msg_type(msg) == FIRST_FRAGMENT)) {
383 length = msg_size(msg_get_wrapped(msg)); 383 length = msg_size(msg_get_wrapped(msg));
384 } 384 }
385 if (length) { 385 if (length) {
@@ -2788,8 +2788,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2788 2788
2789 /* Is there an incomplete message waiting for this fragment? */ 2789 /* Is there an incomplete message waiting for this fragment? */
2790 2790
2791 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) 2791 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
2792 || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { 2792 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2793 prev = pbuf; 2793 prev = pbuf;
2794 pbuf = pbuf->next; 2794 pbuf = pbuf->next;
2795 } 2795 }
@@ -3325,8 +3325,8 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
3325 (l_ptr->last_out)), l_ptr->out_queue_size); 3325 (l_ptr->last_out)), l_ptr->out_queue_size);
3326 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - 3326 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3327 msg_seqno(buf_msg(l_ptr->first_out))) 3327 msg_seqno(buf_msg(l_ptr->first_out)))
3328 != (l_ptr->out_queue_size - 1)) 3328 != (l_ptr->out_queue_size - 1)) ||
3329 || (l_ptr->last_out->next != NULL)) { 3329 (l_ptr->last_out->next != NULL)) {
3330 tipc_printf(buf, "\nSend queue inconsistency\n"); 3330 tipc_printf(buf, "\nSend queue inconsistency\n");
3331 tipc_printf(buf, "first_out= %x ", l_ptr->first_out); 3331 tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3332 tipc_printf(buf, "next_out= %x ", l_ptr->next_out); 3332 tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e6d9abf7440e..1ea64f09cc45 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -177,6 +177,7 @@ static void reject_rx_queue(struct sock *sk)
177 * @net: network namespace (must be default network) 177 * @net: network namespace (must be default network)
178 * @sock: pre-allocated socket structure 178 * @sock: pre-allocated socket structure
179 * @protocol: protocol indicator (must be 0) 179 * @protocol: protocol indicator (must be 0)
180 * @kern: caused by kernel or by userspace?
180 * 181 *
181 * This routine creates additional data structures used by the TIPC socket, 182 * This routine creates additional data structures used by the TIPC socket,
182 * initializes them, and links them together. 183 * initializes them, and links them together.
@@ -184,7 +185,8 @@ static void reject_rx_queue(struct sock *sk)
184 * Returns 0 on success, errno otherwise 185 * Returns 0 on success, errno otherwise
185 */ 186 */
186 187
187static int tipc_create(struct net *net, struct socket *sock, int protocol) 188static int tipc_create(struct net *net, struct socket *sock, int protocol,
189 int kern)
188{ 190{
189 const struct proto_ops *ops; 191 const struct proto_ops *ops;
190 socket_state state; 192 socket_state state;
@@ -193,7 +195,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
193 195
194 /* Validate arguments */ 196 /* Validate arguments */
195 197
196 if (net != &init_net) 198 if (!net_eq(net, &init_net))
197 return -EAFNOSUPPORT; 199 return -EAFNOSUPPORT;
198 200
199 if (unlikely(protocol != 0)) 201 if (unlikely(protocol != 0))
@@ -1134,13 +1136,11 @@ restart:
1134 1136
1135 /* Loop around if more data is required */ 1137 /* Loop around if more data is required */
1136 1138
1137 if ((sz_copied < buf_len) /* didn't get all requested data */ 1139 if ((sz_copied < buf_len) && /* didn't get all requested data */
1138 && (!skb_queue_empty(&sk->sk_receive_queue) || 1140 (!skb_queue_empty(&sk->sk_receive_queue) ||
1139 (flags & MSG_WAITALL)) 1141 (flags & MSG_WAITALL)) && /* and more is ready or required */
1140 /* ... and more is ready or required */ 1142 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1141 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ 1143 (!err)) /* and haven't reached a FIN */
1142 && (!err) /* ... and haven't reached a FIN */
1143 )
1144 goto restart; 1144 goto restart;
1145 1145
1146exit: 1146exit:
@@ -1528,7 +1528,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1528 1528
1529 buf = skb_peek(&sk->sk_receive_queue); 1529 buf = skb_peek(&sk->sk_receive_queue);
1530 1530
1531 res = tipc_create(sock_net(sock->sk), new_sock, 0); 1531 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1532 if (!res) { 1532 if (!res) {
1533 struct sock *new_sk = new_sock->sk; 1533 struct sock *new_sk = new_sock->sk;
1534 struct tipc_sock *new_tsock = tipc_sk(new_sk); 1534 struct tipc_sock *new_tsock = tipc_sk(new_sk);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0747d8a9232f..ac91f0dfa144 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -364,9 +364,9 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
364 sub->seq.upper = htohl(s->seq.upper, swap); 364 sub->seq.upper = htohl(s->seq.upper, swap);
365 sub->timeout = htohl(s->timeout, swap); 365 sub->timeout = htohl(s->timeout, swap);
366 sub->filter = htohl(s->filter, swap); 366 sub->filter = htohl(s->filter, swap);
367 if ((!(sub->filter & TIPC_SUB_PORTS) 367 if ((!(sub->filter & TIPC_SUB_PORTS) ==
368 == !(sub->filter & TIPC_SUB_SERVICE)) 368 !(sub->filter & TIPC_SUB_SERVICE)) ||
369 || (sub->seq.lower > sub->seq.upper)) { 369 (sub->seq.lower > sub->seq.upper)) {
370 warn("Subscription rejected, illegal request\n"); 370 warn("Subscription rejected, illegal request\n");
371 kfree(sub); 371 kfree(sub);
372 subscr_terminate(subscriber); 372 subscr_terminate(subscriber);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3291902f0b88..f25511903115 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -621,7 +621,8 @@ out:
621 return sk; 621 return sk;
622} 622}
623 623
624static int unix_create(struct net *net, struct socket *sock, int protocol) 624static int unix_create(struct net *net, struct socket *sock, int protocol,
625 int kern)
625{ 626{
626 if (protocol && protocol != PF_UNIX) 627 if (protocol && protocol != PF_UNIX)
627 return -EPROTONOSUPPORT; 628 return -EPROTONOSUPPORT;
@@ -1032,8 +1033,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1032 goto out; 1033 goto out;
1033 addr_len = err; 1034 addr_len = err;
1034 1035
1035 if (test_bit(SOCK_PASSCRED, &sock->flags) 1036 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1036 && !u->addr && (err = unix_autobind(sock)) != 0) 1037 (err = unix_autobind(sock)) != 0)
1037 goto out; 1038 goto out;
1038 1039
1039 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 1040 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
@@ -1258,7 +1259,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
1258{ 1259{
1259 struct sock *sk = sock->sk; 1260 struct sock *sk = sock->sk;
1260 struct unix_sock *u; 1261 struct unix_sock *u;
1261 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 1262 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1262 int err = 0; 1263 int err = 0;
1263 1264
1264 if (peer) { 1265 if (peer) {
@@ -1377,8 +1378,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1377 goto out; 1378 goto out;
1378 } 1379 }
1379 1380
1380 if (test_bit(SOCK_PASSCRED, &sock->flags) 1381 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1381 && !u->addr && (err = unix_autobind(sock)) != 0) 1382 && (err = unix_autobind(sock)) != 0)
1382 goto out; 1383 goto out;
1383 1384
1384 err = -EMSGSIZE; 1385 err = -EMSGSIZE;
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d631a17186bc..d3bfb6ef13ae 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -388,6 +388,8 @@ int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
388 } 388 }
389 mutex_lock(&wimax_dev->mutex); 389 mutex_lock(&wimax_dev->mutex);
390 result = wimax_dev_is_ready(wimax_dev); 390 result = wimax_dev_is_ready(wimax_dev);
391 if (result == -ENOMEDIUM)
392 result = 0;
391 if (result < 0) 393 if (result < 0)
392 goto error_not_ready; 394 goto error_not_ready;
393 result = -ENOSYS; 395 result = -ENOSYS;
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 70ef4df863b9..ae752a64d920 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -107,8 +107,8 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
107 107
108 if (state != wimax_dev->rf_hw) { 108 if (state != wimax_dev->rf_hw) {
109 wimax_dev->rf_hw = state; 109 wimax_dev->rf_hw = state;
110 if (wimax_dev->rf_hw == WIMAX_RF_ON 110 if (wimax_dev->rf_hw == WIMAX_RF_ON &&
111 && wimax_dev->rf_sw == WIMAX_RF_ON) 111 wimax_dev->rf_sw == WIMAX_RF_ON)
112 wimax_state = WIMAX_ST_READY; 112 wimax_state = WIMAX_ST_READY;
113 else 113 else
114 wimax_state = WIMAX_ST_RADIO_OFF; 114 wimax_state = WIMAX_ST_RADIO_OFF;
@@ -163,8 +163,8 @@ void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
163 163
164 if (state != wimax_dev->rf_sw) { 164 if (state != wimax_dev->rf_sw) {
165 wimax_dev->rf_sw = state; 165 wimax_dev->rf_sw = state;
166 if (wimax_dev->rf_hw == WIMAX_RF_ON 166 if (wimax_dev->rf_hw == WIMAX_RF_ON &&
167 && wimax_dev->rf_sw == WIMAX_RF_ON) 167 wimax_dev->rf_sw == WIMAX_RF_ON)
168 wimax_state = WIMAX_ST_READY; 168 wimax_state = WIMAX_ST_READY;
169 else 169 else
170 wimax_state = WIMAX_ST_RADIO_OFF; 170 wimax_state = WIMAX_ST_RADIO_OFF;
@@ -305,8 +305,15 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
305 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); 305 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
306 mutex_lock(&wimax_dev->mutex); 306 mutex_lock(&wimax_dev->mutex);
307 result = wimax_dev_is_ready(wimax_dev); 307 result = wimax_dev_is_ready(wimax_dev);
308 if (result < 0) 308 if (result < 0) {
309 /* While initializing, < 1.4.3 wimax-tools versions use
310 * this call to check if the device is a valid WiMAX
311 * device; so we allow it to proceed always,
312 * considering the radios are all off. */
313 if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY)
314 result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF;
309 goto error_not_ready; 315 goto error_not_ready;
316 }
310 switch (state) { 317 switch (state) {
311 case WIMAX_RF_ON: 318 case WIMAX_RF_ON:
312 case WIMAX_RF_OFF: 319 case WIMAX_RF_OFF:
@@ -355,6 +362,7 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
355 362
356 wimax_dev->rfkill = rfkill; 363 wimax_dev->rfkill = rfkill;
357 364
365 rfkill_init_sw_state(rfkill, 1);
358 result = rfkill_register(wimax_dev->rfkill); 366 result = rfkill_register(wimax_dev->rfkill);
359 if (result < 0) 367 if (result < 0)
360 goto error_rfkill_register; 368 goto error_rfkill_register;
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 79fb7d7c640f..c8866412f830 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -60,6 +60,14 @@
60#define D_SUBMODULE stack 60#define D_SUBMODULE stack
61#include "debug-levels.h" 61#include "debug-levels.h"
62 62
63static char wimax_debug_params[128];
64module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
65 0644);
66MODULE_PARM_DESC(debug,
67 "String of space-separated NAME:VALUE pairs, where NAMEs "
68 "are the different debug submodules and VALUE are the "
69 "initial debug value to set.");
70
63/* 71/*
64 * Authoritative source for the RE_STATE_CHANGE attribute policy 72 * Authoritative source for the RE_STATE_CHANGE attribute policy
65 * 73 *
@@ -562,6 +570,9 @@ int __init wimax_subsys_init(void)
562 int result, cnt; 570 int result, cnt;
563 571
564 d_fnstart(4, NULL, "()\n"); 572 d_fnstart(4, NULL, "()\n");
573 d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
574 "wimax.debug");
575
565 snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), 576 snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
566 "WiMAX"); 577 "WiMAX");
567 result = genl_register_family(&wimax_gnl_family); 578 result = genl_register_family(&wimax_gnl_family);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 614bdcec1c80..90e93a5701aa 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -74,12 +74,6 @@ config CFG80211_REG_DEBUG
74 74
75 If unsure, say N. 75 If unsure, say N.
76 76
77config CFG80211_DEFAULT_PS_VALUE
78 int
79 default 1 if CFG80211_DEFAULT_PS
80 default 0
81 depends on CFG80211
82
83config CFG80211_DEFAULT_PS 77config CFG80211_DEFAULT_PS
84 bool "enable powersave by default" 78 bool "enable powersave by default"
85 depends on CFG80211 79 depends on CFG80211
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 07252967be9c..fe6f402a22af 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -14,6 +14,7 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/sched.h>
17#include <net/genetlink.h> 18#include <net/genetlink.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
19#include "nl80211.h" 20#include "nl80211.h"
@@ -230,7 +231,7 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
230 struct wireless_dev *wdev; 231 struct wireless_dev *wdev;
231 int err = 0; 232 int err = 0;
232 233
233 if (!rdev->wiphy.netnsok) 234 if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
234 return -EOPNOTSUPP; 235 return -EOPNOTSUPP;
235 236
236 list_for_each_entry(wdev, &rdev->netdev_list, list) { 237 list_for_each_entry(wdev, &rdev->netdev_list, list) {
@@ -367,7 +368,9 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
367 rdev->wiphy.dev.class = &ieee80211_class; 368 rdev->wiphy.dev.class = &ieee80211_class;
368 rdev->wiphy.dev.platform_data = rdev; 369 rdev->wiphy.dev.platform_data = rdev;
369 370
370 rdev->wiphy.ps_default = CONFIG_CFG80211_DEFAULT_PS_VALUE; 371#ifdef CONFIG_CFG80211_DEFAULT_PS
372 rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
373#endif
371 374
372 wiphy_net_set(&rdev->wiphy, &init_net); 375 wiphy_net_set(&rdev->wiphy, &init_net);
373 376
@@ -482,7 +485,7 @@ int wiphy_register(struct wiphy *wiphy)
482 if (IS_ERR(rdev->wiphy.debugfsdir)) 485 if (IS_ERR(rdev->wiphy.debugfsdir))
483 rdev->wiphy.debugfsdir = NULL; 486 rdev->wiphy.debugfsdir = NULL;
484 487
485 if (wiphy->custom_regulatory) { 488 if (wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
486 struct regulatory_request request; 489 struct regulatory_request request;
487 490
488 request.wiphy_idx = get_wiphy_idx(wiphy); 491 request.wiphy_idx = get_wiphy_idx(wiphy);
@@ -546,7 +549,7 @@ void wiphy_unregister(struct wiphy *wiphy)
546 * First remove the hardware from everywhere, this makes 549 * First remove the hardware from everywhere, this makes
547 * it impossible to find from userspace. 550 * it impossible to find from userspace.
548 */ 551 */
549 cfg80211_debugfs_rdev_del(rdev); 552 debugfs_remove_recursive(rdev->wiphy.debugfsdir);
550 list_del(&rdev->list); 553 list_del(&rdev->list);
551 554
552 /* 555 /*
@@ -569,7 +572,6 @@ void wiphy_unregister(struct wiphy *wiphy)
569 572
570 cfg80211_rdev_list_generation++; 573 cfg80211_rdev_list_generation++;
571 device_del(&rdev->wiphy.dev); 574 device_del(&rdev->wiphy.dev);
572 debugfs_remove(rdev->wiphy.debugfsdir);
573 575
574 mutex_unlock(&cfg80211_mutex); 576 mutex_unlock(&cfg80211_mutex);
575 577
@@ -681,7 +683,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
681 wdev->wext.default_key = -1; 683 wdev->wext.default_key = -1;
682 wdev->wext.default_mgmt_key = -1; 684 wdev->wext.default_mgmt_key = -1;
683 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; 685 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
684 wdev->wext.ps = wdev->wiphy->ps_default; 686 if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
687 wdev->wext.ps = true;
688 else
689 wdev->wext.ps = false;
685 wdev->wext.ps_timeout = 100; 690 wdev->wext.ps_timeout = 100;
686 if (rdev->ops->set_power_mgmt) 691 if (rdev->ops->set_power_mgmt)
687 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, 692 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
@@ -693,6 +698,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
693#endif 698#endif
694 if (!dev->ethtool_ops) 699 if (!dev->ethtool_ops)
695 dev->ethtool_ops = &cfg80211_ethtool_ops; 700 dev->ethtool_ops = &cfg80211_ethtool_ops;
701
702 if ((wdev->iftype == NL80211_IFTYPE_STATION ||
703 wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
704 dev->priv_flags |= IFF_DONT_BRIDGE;
696 break; 705 break;
697 case NETDEV_GOING_DOWN: 706 case NETDEV_GOING_DOWN:
698 switch (wdev->iftype) { 707 switch (wdev->iftype) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 2a33d8bc886b..a9db9e6255bb 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -72,17 +72,6 @@ struct cfg80211_registered_device {
72 /* current channel */ 72 /* current channel */
73 struct ieee80211_channel *channel; 73 struct ieee80211_channel *channel;
74 74
75#ifdef CONFIG_CFG80211_DEBUGFS
76 /* Debugfs entries */
77 struct wiphy_debugfsdentries {
78 struct dentry *rts_threshold;
79 struct dentry *fragmentation_threshold;
80 struct dentry *short_retry_limit;
81 struct dentry *long_retry_limit;
82 struct dentry *ht40allow_map;
83 } debugfs;
84#endif
85
86 /* must be last because of the way we do wiphy_priv(), 75 /* must be last because of the way we do wiphy_priv(),
87 * and it should at least be aligned to NETDEV_ALIGN */ 76 * and it should at least be aligned to NETDEV_ALIGN */
88 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 77 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -284,6 +273,8 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
284 struct cfg80211_ibss_params *params, 273 struct cfg80211_ibss_params *params,
285 struct cfg80211_cached_keys *connkeys); 274 struct cfg80211_cached_keys *connkeys);
286void cfg80211_clear_ibss(struct net_device *dev, bool nowext); 275void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
276int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
277 struct net_device *dev, bool nowext);
287int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, 278int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
288 struct net_device *dev, bool nowext); 279 struct net_device *dev, bool nowext);
289void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid); 280void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
@@ -358,6 +349,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
358 struct wireless_dev *wdev); 349 struct wireless_dev *wdev);
359 350
360void cfg80211_conn_work(struct work_struct *work); 351void cfg80211_conn_work(struct work_struct *work);
352void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
361bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); 353bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
362 354
363/* internal helpers */ 355/* internal helpers */
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 13d93d84f902..2e4895615037 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -104,11 +104,7 @@ static const struct file_operations ht40allow_map_ops = {
104}; 104};
105 105
106#define DEBUGFS_ADD(name) \ 106#define DEBUGFS_ADD(name) \
107 rdev->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \ 107 debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops);
108 &rdev->wiphy, &name## _ops);
109#define DEBUGFS_DEL(name) \
110 debugfs_remove(rdev->debugfs.name); \
111 rdev->debugfs.name = NULL;
112 108
113void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) 109void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
114{ 110{
@@ -120,12 +116,3 @@ void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
120 DEBUGFS_ADD(long_retry_limit); 116 DEBUGFS_ADD(long_retry_limit);
121 DEBUGFS_ADD(ht40allow_map); 117 DEBUGFS_ADD(ht40allow_map);
122} 118}
123
124void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev)
125{
126 DEBUGFS_DEL(rts_threshold);
127 DEBUGFS_DEL(fragmentation_threshold);
128 DEBUGFS_DEL(short_retry_limit);
129 DEBUGFS_DEL(long_retry_limit);
130 DEBUGFS_DEL(ht40allow_map);
131}
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h
index 6419b6d6ce3e..74fdd3811427 100644
--- a/net/wireless/debugfs.h
+++ b/net/wireless/debugfs.h
@@ -3,12 +3,9 @@
3 3
4#ifdef CONFIG_CFG80211_DEBUGFS 4#ifdef CONFIG_CFG80211_DEBUGFS
5void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev); 5void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev);
6void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev);
7#else 6#else
8static inline 7static inline
9void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {} 8void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {}
10static inline
11void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) {}
12#endif 9#endif
13 10
14#endif /* __CFG80211_DEBUGFS_H */ 11#endif /* __CFG80211_DEBUGFS_H */
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 39b6d92e2828..34dfc93fa713 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -169,8 +169,8 @@ void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
169 wdev_unlock(wdev); 169 wdev_unlock(wdev);
170} 170}
171 171
172static int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, 172int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
173 struct net_device *dev, bool nowext) 173 struct net_device *dev, bool nowext)
174{ 174{
175 struct wireless_dev *wdev = dev->ieee80211_ptr; 175 struct wireless_dev *wdev = dev->ieee80211_ptr;
176 int err; 176 int err;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1f87b4e7f4f7..1001db4912f7 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -94,6 +94,13 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
94 } 94 }
95 95
96 WARN_ON(!bss); 96 WARN_ON(!bss);
97 } else if (wdev->conn) {
98 cfg80211_sme_failed_assoc(wdev);
99 /*
100 * do not call connect_result() now because the
101 * sme will schedule work that does it later.
102 */
103 goto out;
97 } 104 }
98 105
99 if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { 106 if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
@@ -236,21 +243,12 @@ void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
236} 243}
237EXPORT_SYMBOL(cfg80211_send_disassoc); 244EXPORT_SYMBOL(cfg80211_send_disassoc);
238 245
239void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) 246static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr)
240{ 247{
241 struct wireless_dev *wdev = dev->ieee80211_ptr;
242 struct wiphy *wiphy = wdev->wiphy;
243 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
244 int i; 248 int i;
245 bool done = false; 249 bool done = false;
246 250
247 wdev_lock(wdev); 251 ASSERT_WDEV_LOCK(wdev);
248
249 nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
250 if (wdev->sme_state == CFG80211_SME_CONNECTING)
251 __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
252 WLAN_STATUS_UNSPECIFIED_FAILURE,
253 false, NULL);
254 252
255 for (i = 0; addr && i < MAX_AUTH_BSSES; i++) { 253 for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
256 if (wdev->authtry_bsses[i] && 254 if (wdev->authtry_bsses[i] &&
@@ -265,6 +263,29 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
265 } 263 }
266 264
267 WARN_ON(!done); 265 WARN_ON(!done);
266}
267
268void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr)
269{
270 __cfg80211_auth_remove(dev->ieee80211_ptr, addr);
271}
272EXPORT_SYMBOL(__cfg80211_auth_canceled);
273
274void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
275{
276 struct wireless_dev *wdev = dev->ieee80211_ptr;
277 struct wiphy *wiphy = wdev->wiphy;
278 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
279
280 wdev_lock(wdev);
281
282 nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
283 if (wdev->sme_state == CFG80211_SME_CONNECTING)
284 __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
285 WLAN_STATUS_UNSPECIFIED_FAILURE,
286 false, NULL);
287
288 __cfg80211_auth_remove(wdev, addr);
268 289
269 wdev_unlock(wdev); 290 wdev_unlock(wdev);
270} 291}
@@ -439,12 +460,23 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
439 struct cfg80211_assoc_request req; 460 struct cfg80211_assoc_request req;
440 struct cfg80211_internal_bss *bss; 461 struct cfg80211_internal_bss *bss;
441 int i, err, slot = -1; 462 int i, err, slot = -1;
463 bool was_connected = false;
442 464
443 ASSERT_WDEV_LOCK(wdev); 465 ASSERT_WDEV_LOCK(wdev);
444 466
445 memset(&req, 0, sizeof(req)); 467 memset(&req, 0, sizeof(req));
446 468
447 if (wdev->current_bss) 469 if (wdev->current_bss && prev_bssid &&
470 memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) {
471 /*
472 * Trying to reassociate: Allow this to proceed and let the old
473 * association to be dropped when the new one is completed.
474 */
475 if (wdev->sme_state == CFG80211_SME_CONNECTED) {
476 was_connected = true;
477 wdev->sme_state = CFG80211_SME_CONNECTING;
478 }
479 } else if (wdev->current_bss)
448 return -EALREADY; 480 return -EALREADY;
449 481
450 req.ie = ie; 482 req.ie = ie;
@@ -454,8 +486,11 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
454 req.prev_bssid = prev_bssid; 486 req.prev_bssid = prev_bssid;
455 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, 487 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
456 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 488 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
457 if (!req.bss) 489 if (!req.bss) {
490 if (was_connected)
491 wdev->sme_state = CFG80211_SME_CONNECTED;
458 return -ENOENT; 492 return -ENOENT;
493 }
459 494
460 bss = bss_from_pub(req.bss); 495 bss = bss_from_pub(req.bss);
461 496
@@ -473,6 +508,8 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
473 508
474 err = rdev->ops->assoc(&rdev->wiphy, dev, &req); 509 err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
475 out: 510 out:
511 if (err && was_connected)
512 wdev->sme_state = CFG80211_SME_CONNECTED;
476 /* still a reference in wdev->auth_bsses[slot] */ 513 /* still a reference in wdev->auth_bsses[slot] */
477 cfg80211_put_bss(req.bss); 514 cfg80211_put_bss(req.bss);
478 return err; 515 return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f48394126bf9..149539ade15e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -138,6 +138,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
138 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, 138 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
139 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, 139 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
140 [NL80211_ATTR_PID] = { .type = NLA_U32 }, 140 [NL80211_ATTR_PID] = { .type = NLA_U32 },
141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
141}; 142};
142 143
143/* policy for the attributes */ 144/* policy for the attributes */
@@ -151,6 +152,26 @@ nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
151 [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, 152 [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
152}; 153};
153 154
155/* ifidx get helper */
156static int nl80211_get_ifidx(struct netlink_callback *cb)
157{
158 int res;
159
160 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
161 nl80211_fam.attrbuf, nl80211_fam.maxattr,
162 nl80211_policy);
163 if (res)
164 return res;
165
166 if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
167 return -EINVAL;
168
169 res = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
170 if (!res)
171 return -EINVAL;
172 return res;
173}
174
154/* IE validation */ 175/* IE validation */
155static bool is_valid_ie_attr(const struct nlattr *attr) 176static bool is_valid_ie_attr(const struct nlattr *attr)
156{ 177{
@@ -540,7 +561,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
540 CMD(deauth, DEAUTHENTICATE); 561 CMD(deauth, DEAUTHENTICATE);
541 CMD(disassoc, DISASSOCIATE); 562 CMD(disassoc, DISASSOCIATE);
542 CMD(join_ibss, JOIN_IBSS); 563 CMD(join_ibss, JOIN_IBSS);
543 if (dev->wiphy.netnsok) { 564 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
544 i++; 565 i++;
545 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 566 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
546 } 567 }
@@ -947,6 +968,32 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
947 return 0; 968 return 0;
948} 969}
949 970
971static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
972 struct net_device *netdev, u8 use_4addr,
973 enum nl80211_iftype iftype)
974{
975 if (!use_4addr) {
976 if (netdev && netdev->br_port)
977 return -EBUSY;
978 return 0;
979 }
980
981 switch (iftype) {
982 case NL80211_IFTYPE_AP_VLAN:
983 if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP)
984 return 0;
985 break;
986 case NL80211_IFTYPE_STATION:
987 if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_STATION)
988 return 0;
989 break;
990 default:
991 break;
992 }
993
994 return -EOPNOTSUPP;
995}
996
950static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) 997static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
951{ 998{
952 struct cfg80211_registered_device *rdev; 999 struct cfg80211_registered_device *rdev;
@@ -987,6 +1034,16 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
987 change = true; 1034 change = true;
988 } 1035 }
989 1036
1037 if (info->attrs[NL80211_ATTR_4ADDR]) {
1038 params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]);
1039 change = true;
1040 err = nl80211_valid_4addr(rdev, dev, params.use_4addr, ntype);
1041 if (err)
1042 goto unlock;
1043 } else {
1044 params.use_4addr = -1;
1045 }
1046
990 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { 1047 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
991 if (ntype != NL80211_IFTYPE_MONITOR) { 1048 if (ntype != NL80211_IFTYPE_MONITOR) {
992 err = -EINVAL; 1049 err = -EINVAL;
@@ -1006,6 +1063,9 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
1006 else 1063 else
1007 err = 0; 1064 err = 0;
1008 1065
1066 if (!err && params.use_4addr != -1)
1067 dev->ieee80211_ptr->use_4addr = params.use_4addr;
1068
1009 unlock: 1069 unlock:
1010 dev_put(dev); 1070 dev_put(dev);
1011 cfg80211_unlock_rdev(rdev); 1071 cfg80211_unlock_rdev(rdev);
@@ -1053,6 +1113,13 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
1053 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); 1113 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
1054 } 1114 }
1055 1115
1116 if (info->attrs[NL80211_ATTR_4ADDR]) {
1117 params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]);
1118 err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type);
1119 if (err)
1120 goto unlock;
1121 }
1122
1056 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? 1123 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
1057 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 1124 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
1058 &flags); 1125 &flags);
@@ -1682,20 +1749,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
1682 int sta_idx = cb->args[1]; 1749 int sta_idx = cb->args[1];
1683 int err; 1750 int err;
1684 1751
1685 if (!ifidx) { 1752 if (!ifidx)
1686 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 1753 ifidx = nl80211_get_ifidx(cb);
1687 nl80211_fam.attrbuf, nl80211_fam.maxattr, 1754 if (ifidx < 0)
1688 nl80211_policy); 1755 return ifidx;
1689 if (err)
1690 return err;
1691
1692 if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
1693 return -EINVAL;
1694
1695 ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
1696 if (!ifidx)
1697 return -EINVAL;
1698 }
1699 1756
1700 rtnl_lock(); 1757 rtnl_lock();
1701 1758
@@ -1800,7 +1857,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
1800} 1857}
1801 1858
1802/* 1859/*
1803 * Get vlan interface making sure it is on the right wiphy. 1860 * Get vlan interface making sure it is running and on the right wiphy.
1804 */ 1861 */
1805static int get_vlan(struct genl_info *info, 1862static int get_vlan(struct genl_info *info,
1806 struct cfg80211_registered_device *rdev, 1863 struct cfg80211_registered_device *rdev,
@@ -1818,6 +1875,8 @@ static int get_vlan(struct genl_info *info,
1818 return -EINVAL; 1875 return -EINVAL;
1819 if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) 1876 if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy)
1820 return -EINVAL; 1877 return -EINVAL;
1878 if (!netif_running(*vlan))
1879 return -ENETDOWN;
1821 } 1880 }
1822 return 0; 1881 return 0;
1823} 1882}
@@ -2105,9 +2164,9 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
2105 if (pinfo->filled & MPATH_INFO_FRAME_QLEN) 2164 if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
2106 NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, 2165 NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
2107 pinfo->frame_qlen); 2166 pinfo->frame_qlen);
2108 if (pinfo->filled & MPATH_INFO_DSN) 2167 if (pinfo->filled & MPATH_INFO_SN)
2109 NLA_PUT_U32(msg, NL80211_MPATH_INFO_DSN, 2168 NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN,
2110 pinfo->dsn); 2169 pinfo->sn);
2111 if (pinfo->filled & MPATH_INFO_METRIC) 2170 if (pinfo->filled & MPATH_INFO_METRIC)
2112 NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, 2171 NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
2113 pinfo->metric); 2172 pinfo->metric);
@@ -2145,20 +2204,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
2145 int path_idx = cb->args[1]; 2204 int path_idx = cb->args[1];
2146 int err; 2205 int err;
2147 2206
2148 if (!ifidx) { 2207 if (!ifidx)
2149 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 2208 ifidx = nl80211_get_ifidx(cb);
2150 nl80211_fam.attrbuf, nl80211_fam.maxattr, 2209 if (ifidx < 0)
2151 nl80211_policy); 2210 return ifidx;
2152 if (err)
2153 return err;
2154
2155 if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
2156 return -EINVAL;
2157
2158 ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
2159 if (!ifidx)
2160 return -EINVAL;
2161 }
2162 2211
2163 rtnl_lock(); 2212 rtnl_lock();
2164 2213
@@ -2605,6 +2654,8 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
2605 cur_params.dot11MeshHWMPpreqMinInterval); 2654 cur_params.dot11MeshHWMPpreqMinInterval);
2606 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 2655 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
2607 cur_params.dot11MeshHWMPnetDiameterTraversalTime); 2656 cur_params.dot11MeshHWMPnetDiameterTraversalTime);
2657 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
2658 cur_params.dot11MeshHWMPRootMode);
2608 nla_nest_end(msg, pinfoattr); 2659 nla_nest_end(msg, pinfoattr);
2609 genlmsg_end(msg, hdr); 2660 genlmsg_end(msg, hdr);
2610 err = genlmsg_reply(msg, info); 2661 err = genlmsg_reply(msg, info);
@@ -2715,6 +2766,10 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
2715 dot11MeshHWMPnetDiameterTraversalTime, 2766 dot11MeshHWMPnetDiameterTraversalTime,
2716 mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 2767 mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
2717 nla_get_u16); 2768 nla_get_u16);
2769 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
2770 dot11MeshHWMPRootMode, mask,
2771 NL80211_MESHCONF_HWMP_ROOTMODE,
2772 nla_get_u8);
2718 2773
2719 /* Apply changes */ 2774 /* Apply changes */
2720 err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask); 2775 err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask);
@@ -2988,7 +3043,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2988 goto out; 3043 goto out;
2989 } 3044 }
2990 3045
2991 request->n_channels = n_channels;
2992 if (n_ssids) 3046 if (n_ssids)
2993 request->ssids = (void *)&request->channels[n_channels]; 3047 request->ssids = (void *)&request->channels[n_channels];
2994 request->n_ssids = n_ssids; 3048 request->n_ssids = n_ssids;
@@ -2999,32 +3053,53 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2999 request->ie = (void *)(request->channels + n_channels); 3053 request->ie = (void *)(request->channels + n_channels);
3000 } 3054 }
3001 3055
3056 i = 0;
3002 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { 3057 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3003 /* user specified, bail out if channel not found */ 3058 /* user specified, bail out if channel not found */
3004 request->n_channels = n_channels;
3005 i = 0;
3006 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { 3059 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) {
3007 request->channels[i] = ieee80211_get_channel(wiphy, nla_get_u32(attr)); 3060 struct ieee80211_channel *chan;
3008 if (!request->channels[i]) { 3061
3062 chan = ieee80211_get_channel(wiphy, nla_get_u32(attr));
3063
3064 if (!chan) {
3009 err = -EINVAL; 3065 err = -EINVAL;
3010 goto out_free; 3066 goto out_free;
3011 } 3067 }
3068
3069 /* ignore disabled channels */
3070 if (chan->flags & IEEE80211_CHAN_DISABLED)
3071 continue;
3072
3073 request->channels[i] = chan;
3012 i++; 3074 i++;
3013 } 3075 }
3014 } else { 3076 } else {
3015 /* all channels */ 3077 /* all channels */
3016 i = 0;
3017 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 3078 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
3018 int j; 3079 int j;
3019 if (!wiphy->bands[band]) 3080 if (!wiphy->bands[band])
3020 continue; 3081 continue;
3021 for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 3082 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
3022 request->channels[i] = &wiphy->bands[band]->channels[j]; 3083 struct ieee80211_channel *chan;
3084
3085 chan = &wiphy->bands[band]->channels[j];
3086
3087 if (chan->flags & IEEE80211_CHAN_DISABLED)
3088 continue;
3089
3090 request->channels[i] = chan;
3023 i++; 3091 i++;
3024 } 3092 }
3025 } 3093 }
3026 } 3094 }
3027 3095
3096 if (!i) {
3097 err = -EINVAL;
3098 goto out_free;
3099 }
3100
3101 request->n_channels = i;
3102
3028 i = 0; 3103 i = 0;
3029 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3104 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3030 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { 3105 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
@@ -3161,21 +3236,11 @@ static int nl80211_dump_scan(struct sk_buff *skb,
3161 int start = cb->args[1], idx = 0; 3236 int start = cb->args[1], idx = 0;
3162 int err; 3237 int err;
3163 3238
3164 if (!ifidx) { 3239 if (!ifidx)
3165 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 3240 ifidx = nl80211_get_ifidx(cb);
3166 nl80211_fam.attrbuf, nl80211_fam.maxattr, 3241 if (ifidx < 0)
3167 nl80211_policy); 3242 return ifidx;
3168 if (err) 3243 cb->args[0] = ifidx;
3169 return err;
3170
3171 if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
3172 return -EINVAL;
3173
3174 ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
3175 if (!ifidx)
3176 return -EINVAL;
3177 cb->args[0] = ifidx;
3178 }
3179 3244
3180 dev = dev_get_by_index(sock_net(skb->sk), ifidx); 3245 dev = dev_get_by_index(sock_net(skb->sk), ifidx);
3181 if (!dev) 3246 if (!dev)
@@ -3218,6 +3283,106 @@ static int nl80211_dump_scan(struct sk_buff *skb,
3218 return err; 3283 return err;
3219} 3284}
3220 3285
3286static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
3287 int flags, struct net_device *dev,
3288 struct survey_info *survey)
3289{
3290 void *hdr;
3291 struct nlattr *infoattr;
3292
3293 /* Survey without a channel doesn't make sense */
3294 if (!survey->channel)
3295 return -EINVAL;
3296
3297 hdr = nl80211hdr_put(msg, pid, seq, flags,
3298 NL80211_CMD_NEW_SURVEY_RESULTS);
3299 if (!hdr)
3300 return -ENOMEM;
3301
3302 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
3303
3304 infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
3305 if (!infoattr)
3306 goto nla_put_failure;
3307
3308 NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY,
3309 survey->channel->center_freq);
3310 if (survey->filled & SURVEY_INFO_NOISE_DBM)
3311 NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE,
3312 survey->noise);
3313
3314 nla_nest_end(msg, infoattr);
3315
3316 return genlmsg_end(msg, hdr);
3317
3318 nla_put_failure:
3319 genlmsg_cancel(msg, hdr);
3320 return -EMSGSIZE;
3321}
3322
3323static int nl80211_dump_survey(struct sk_buff *skb,
3324 struct netlink_callback *cb)
3325{
3326 struct survey_info survey;
3327 struct cfg80211_registered_device *dev;
3328 struct net_device *netdev;
3329 int ifidx = cb->args[0];
3330 int survey_idx = cb->args[1];
3331 int res;
3332
3333 if (!ifidx)
3334 ifidx = nl80211_get_ifidx(cb);
3335 if (ifidx < 0)
3336 return ifidx;
3337 cb->args[0] = ifidx;
3338
3339 rtnl_lock();
3340
3341 netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
3342 if (!netdev) {
3343 res = -ENODEV;
3344 goto out_rtnl;
3345 }
3346
3347 dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
3348 if (IS_ERR(dev)) {
3349 res = PTR_ERR(dev);
3350 goto out_rtnl;
3351 }
3352
3353 if (!dev->ops->dump_survey) {
3354 res = -EOPNOTSUPP;
3355 goto out_err;
3356 }
3357
3358 while (1) {
3359 res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx,
3360 &survey);
3361 if (res == -ENOENT)
3362 break;
3363 if (res)
3364 goto out_err;
3365
3366 if (nl80211_send_survey(skb,
3367 NETLINK_CB(cb->skb).pid,
3368 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3369 netdev,
3370 &survey) < 0)
3371 goto out;
3372 survey_idx++;
3373 }
3374
3375 out:
3376 cb->args[1] = survey_idx;
3377 res = skb->len;
3378 out_err:
3379 cfg80211_unlock_rdev(dev);
3380 out_rtnl:
3381 rtnl_unlock();
3382
3383 return res;
3384}
3385
3221static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) 3386static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
3222{ 3387{
3223 return auth_type <= NL80211_AUTHTYPE_MAX; 3388 return auth_type <= NL80211_AUTHTYPE_MAX;
@@ -4295,6 +4460,11 @@ static struct genl_ops nl80211_ops[] = {
4295 .policy = nl80211_policy, 4460 .policy = nl80211_policy,
4296 .flags = GENL_ADMIN_PERM, 4461 .flags = GENL_ADMIN_PERM,
4297 }, 4462 },
4463 {
4464 .cmd = NL80211_CMD_GET_SURVEY,
4465 .policy = nl80211_policy,
4466 .dumpit = nl80211_dump_survey,
4467 },
4298}; 4468};
4299static struct genl_multicast_group nl80211_mlme_mcgrp = { 4469static struct genl_multicast_group nl80211_mlme_mcgrp = {
4300 .name = "mlme", 4470 .name = "mlme",
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f256dfffbf46..1f33017737fd 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1008,7 +1008,7 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
1008 1008
1009 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 1009 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
1010 request_wiphy && request_wiphy == wiphy && 1010 request_wiphy && request_wiphy == wiphy &&
1011 request_wiphy->strict_regulatory) { 1011 request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
1012 /* 1012 /*
1013 * This gaurantees the driver's requested regulatory domain 1013 * This gaurantees the driver's requested regulatory domain
1014 * will always be used as a base for further regulatory 1014 * will always be used as a base for further regulatory
@@ -1051,13 +1051,13 @@ static bool ignore_reg_update(struct wiphy *wiphy,
1051 if (!last_request) 1051 if (!last_request)
1052 return true; 1052 return true;
1053 if (initiator == NL80211_REGDOM_SET_BY_CORE && 1053 if (initiator == NL80211_REGDOM_SET_BY_CORE &&
1054 wiphy->custom_regulatory) 1054 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
1055 return true; 1055 return true;
1056 /* 1056 /*
1057 * wiphy->regd will be set once the device has its own 1057 * wiphy->regd will be set once the device has its own
1058 * desired regulatory domain set 1058 * desired regulatory domain set
1059 */ 1059 */
1060 if (wiphy->strict_regulatory && !wiphy->regd && 1060 if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
1061 !is_world_regdom(last_request->alpha2)) 1061 !is_world_regdom(last_request->alpha2))
1062 return true; 1062 return true;
1063 return false; 1063 return false;
@@ -1093,7 +1093,7 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1093 1093
1094 chan->beacon_found = true; 1094 chan->beacon_found = true;
1095 1095
1096 if (wiphy->disable_beacon_hints) 1096 if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS)
1097 return; 1097 return;
1098 1098
1099 chan_before.center_freq = chan->center_freq; 1099 chan_before.center_freq = chan->center_freq;
@@ -1164,7 +1164,7 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1164 return true; 1164 return true;
1165 if (last_request && 1165 if (last_request &&
1166 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1166 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1167 wiphy->custom_regulatory) 1167 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
1168 return true; 1168 return true;
1169 return false; 1169 return false;
1170} 1170}
@@ -1591,7 +1591,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1591 1591
1592 r = __regulatory_hint(wiphy, reg_request); 1592 r = __regulatory_hint(wiphy, reg_request);
1593 /* This is required so that the orig_* parameters are saved */ 1593 /* This is required so that the orig_* parameters are saved */
1594 if (r == -EALREADY && wiphy && wiphy->strict_regulatory) 1594 if (r == -EALREADY && wiphy &&
1595 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
1595 wiphy_update_regulatory(wiphy, reg_request->initiator); 1596 wiphy_update_regulatory(wiphy, reg_request->initiator);
1596out: 1597out:
1597 mutex_unlock(&reg_mutex); 1598 mutex_unlock(&reg_mutex);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2e8c515f3c5c..96df34c3c6ee 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -217,7 +217,7 @@ static bool is_mesh(struct cfg80211_bss *a,
217 a->len_information_elements); 217 a->len_information_elements);
218 if (!ie) 218 if (!ie)
219 return false; 219 return false;
220 if (ie[1] != IEEE80211_MESH_CONFIG_LEN) 220 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
221 return false; 221 return false;
222 222
223 /* 223 /*
@@ -225,7 +225,8 @@ static bool is_mesh(struct cfg80211_bss *a,
225 * comparing since that may differ between stations taking 225 * comparing since that may differ between stations taking
226 * part in the same mesh. 226 * part in the same mesh.
227 */ 227 */
228 return memcmp(ie + 2, meshcfg, IEEE80211_MESH_CONFIG_LEN - 2) == 0; 228 return memcmp(ie + 2, meshcfg,
229 sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
229} 230}
230 231
231static int cmp_bss(struct cfg80211_bss *a, 232static int cmp_bss(struct cfg80211_bss *a,
@@ -399,7 +400,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
399 res->pub.information_elements, 400 res->pub.information_elements,
400 res->pub.len_information_elements); 401 res->pub.len_information_elements);
401 if (!meshid || !meshcfg || 402 if (!meshid || !meshcfg ||
402 meshcfg[1] != IEEE80211_MESH_CONFIG_LEN) { 403 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
403 /* bogus mesh */ 404 /* bogus mesh */
404 kref_put(&res->ref, bss_release); 405 kref_put(&res->ref, bss_release);
405 return NULL; 406 return NULL;
@@ -650,9 +651,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
650 i = 0; 651 i = 0;
651 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 652 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
652 int j; 653 int j;
654
653 if (!wiphy->bands[band]) 655 if (!wiphy->bands[band])
654 continue; 656 continue;
657
655 for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 658 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
659 /* ignore disabled channels */
660 if (wiphy->bands[band]->channels[j].flags &
661 IEEE80211_CHAN_DISABLED)
662 continue;
656 663
657 /* If we have a wireless request structure and the 664 /* If we have a wireless request structure and the
658 * wireless request specifies frequencies, then search 665 * wireless request specifies frequencies, then search
@@ -859,7 +866,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
859 break; 866 break;
860 case WLAN_EID_MESH_CONFIG: 867 case WLAN_EID_MESH_CONFIG:
861 ismesh = true; 868 ismesh = true;
862 if (ie[1] != IEEE80211_MESH_CONFIG_LEN) 869 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
863 break; 870 break;
864 buf = kmalloc(50, GFP_ATOMIC); 871 buf = kmalloc(50, GFP_ATOMIC);
865 if (!buf) 872 if (!buf)
@@ -867,35 +874,40 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
867 cfg = ie + 2; 874 cfg = ie + 2;
868 memset(&iwe, 0, sizeof(iwe)); 875 memset(&iwe, 0, sizeof(iwe));
869 iwe.cmd = IWEVCUSTOM; 876 iwe.cmd = IWEVCUSTOM;
870 sprintf(buf, "Mesh network (version %d)", cfg[0]); 877 sprintf(buf, "Mesh Network Path Selection Protocol ID: "
878 "0x%02X", cfg[0]);
879 iwe.u.data.length = strlen(buf);
880 current_ev = iwe_stream_add_point(info, current_ev,
881 end_buf,
882 &iwe, buf);
883 sprintf(buf, "Path Selection Metric ID: 0x%02X",
884 cfg[1]);
885 iwe.u.data.length = strlen(buf);
886 current_ev = iwe_stream_add_point(info, current_ev,
887 end_buf,
888 &iwe, buf);
889 sprintf(buf, "Congestion Control Mode ID: 0x%02X",
890 cfg[2]);
871 iwe.u.data.length = strlen(buf); 891 iwe.u.data.length = strlen(buf);
872 current_ev = iwe_stream_add_point(info, current_ev, 892 current_ev = iwe_stream_add_point(info, current_ev,
873 end_buf, 893 end_buf,
874 &iwe, buf); 894 &iwe, buf);
875 sprintf(buf, "Path Selection Protocol ID: " 895 sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]);
876 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
877 cfg[4]);
878 iwe.u.data.length = strlen(buf); 896 iwe.u.data.length = strlen(buf);
879 current_ev = iwe_stream_add_point(info, current_ev, 897 current_ev = iwe_stream_add_point(info, current_ev,
880 end_buf, 898 end_buf,
881 &iwe, buf); 899 &iwe, buf);
882 sprintf(buf, "Path Selection Metric ID: " 900 sprintf(buf, "Authentication ID: 0x%02X", cfg[4]);
883 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
884 cfg[8]);
885 iwe.u.data.length = strlen(buf); 901 iwe.u.data.length = strlen(buf);
886 current_ev = iwe_stream_add_point(info, current_ev, 902 current_ev = iwe_stream_add_point(info, current_ev,
887 end_buf, 903 end_buf,
888 &iwe, buf); 904 &iwe, buf);
889 sprintf(buf, "Congestion Control Mode ID: " 905 sprintf(buf, "Formation Info: 0x%02X", cfg[5]);
890 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
891 cfg[11], cfg[12]);
892 iwe.u.data.length = strlen(buf); 906 iwe.u.data.length = strlen(buf);
893 current_ev = iwe_stream_add_point(info, current_ev, 907 current_ev = iwe_stream_add_point(info, current_ev,
894 end_buf, 908 end_buf,
895 &iwe, buf); 909 &iwe, buf);
896 sprintf(buf, "Channel Precedence: " 910 sprintf(buf, "Capabilities: 0x%02X", cfg[6]);
897 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
898 cfg[15], cfg[16]);
899 iwe.u.data.length = strlen(buf); 911 iwe.u.data.length = strlen(buf);
900 current_ev = iwe_stream_add_point(info, current_ev, 912 current_ev = iwe_stream_add_point(info, current_ev,
901 end_buf, 913 end_buf,
@@ -925,8 +937,8 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
925 ie += ie[1] + 2; 937 ie += ie[1] + 2;
926 } 938 }
927 939
928 if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) 940 if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) ||
929 || ismesh) { 941 ismesh) {
930 memset(&iwe, 0, sizeof(iwe)); 942 memset(&iwe, 0, sizeof(iwe));
931 iwe.cmd = SIOCGIWMODE; 943 iwe.cmd = SIOCGIWMODE;
932 if (ismesh) 944 if (ismesh)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d3624152f7f7..0115d07d2c1a 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -26,6 +26,7 @@ struct cfg80211_conn {
26 CFG80211_CONN_AUTHENTICATING, 26 CFG80211_CONN_AUTHENTICATING,
27 CFG80211_CONN_ASSOCIATE_NEXT, 27 CFG80211_CONN_ASSOCIATE_NEXT,
28 CFG80211_CONN_ASSOCIATING, 28 CFG80211_CONN_ASSOCIATING,
29 CFG80211_CONN_DEAUTH_ASSOC_FAIL,
29 } state; 30 } state;
30 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 31 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
31 u8 *ie; 32 u8 *ie;
@@ -148,6 +149,12 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
148 NULL, 0, 149 NULL, 0,
149 WLAN_REASON_DEAUTH_LEAVING); 150 WLAN_REASON_DEAUTH_LEAVING);
150 return err; 151 return err;
152 case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
153 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
154 NULL, 0,
155 WLAN_REASON_DEAUTH_LEAVING);
156 /* return an error so that we call __cfg80211_connect_result() */
157 return -EINVAL;
151 default: 158 default:
152 return 0; 159 return 0;
153 } 160 }
@@ -158,6 +165,7 @@ void cfg80211_conn_work(struct work_struct *work)
158 struct cfg80211_registered_device *rdev = 165 struct cfg80211_registered_device *rdev =
159 container_of(work, struct cfg80211_registered_device, conn_work); 166 container_of(work, struct cfg80211_registered_device, conn_work);
160 struct wireless_dev *wdev; 167 struct wireless_dev *wdev;
168 u8 bssid_buf[ETH_ALEN], *bssid = NULL;
161 169
162 rtnl_lock(); 170 rtnl_lock();
163 cfg80211_lock_rdev(rdev); 171 cfg80211_lock_rdev(rdev);
@@ -173,10 +181,13 @@ void cfg80211_conn_work(struct work_struct *work)
173 wdev_unlock(wdev); 181 wdev_unlock(wdev);
174 continue; 182 continue;
175 } 183 }
184 if (wdev->conn->params.bssid) {
185 memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
186 bssid = bssid_buf;
187 }
176 if (cfg80211_conn_do_work(wdev)) 188 if (cfg80211_conn_do_work(wdev))
177 __cfg80211_connect_result( 189 __cfg80211_connect_result(
178 wdev->netdev, 190 wdev->netdev, bssid,
179 wdev->conn->params.bssid,
180 NULL, 0, NULL, 0, 191 NULL, 0, NULL, 0,
181 WLAN_STATUS_UNSPECIFIED_FAILURE, 192 WLAN_STATUS_UNSPECIFIED_FAILURE,
182 false, NULL); 193 false, NULL);
@@ -337,6 +348,15 @@ bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev)
337 return true; 348 return true;
338} 349}
339 350
351void cfg80211_sme_failed_assoc(struct wireless_dev *wdev)
352{
353 struct wiphy *wiphy = wdev->wiphy;
354 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
355
356 wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL;
357 schedule_work(&rdev->conn_work);
358}
359
340void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 360void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
341 const u8 *req_ie, size_t req_ie_len, 361 const u8 *req_ie, size_t req_ie_len,
342 const u8 *resp_ie, size_t resp_ie_len, 362 const u8 *resp_ie, size_t resp_ie_len,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3fc2df86278f..59361fdcb5d0 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -320,7 +320,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
320 break; 320 break;
321 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 321 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
322 if (unlikely(iftype != NL80211_IFTYPE_WDS && 322 if (unlikely(iftype != NL80211_IFTYPE_WDS &&
323 iftype != NL80211_IFTYPE_MESH_POINT)) 323 iftype != NL80211_IFTYPE_MESH_POINT &&
324 iftype != NL80211_IFTYPE_AP_VLAN &&
325 iftype != NL80211_IFTYPE_STATION))
324 return -1; 326 return -1;
325 if (iftype == NL80211_IFTYPE_MESH_POINT) { 327 if (iftype == NL80211_IFTYPE_MESH_POINT) {
326 struct ieee80211s_hdr *meshdr = 328 struct ieee80211s_hdr *meshdr =
@@ -656,7 +658,14 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
656 !(rdev->wiphy.interface_modes & (1 << ntype))) 658 !(rdev->wiphy.interface_modes & (1 << ntype)))
657 return -EOPNOTSUPP; 659 return -EOPNOTSUPP;
658 660
661 /* if it's part of a bridge, reject changing type to station/ibss */
662 if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC ||
663 ntype == NL80211_IFTYPE_STATION))
664 return -EBUSY;
665
659 if (ntype != otype) { 666 if (ntype != otype) {
667 dev->ieee80211_ptr->use_4addr = false;
668
660 switch (otype) { 669 switch (otype) {
661 case NL80211_IFTYPE_ADHOC: 670 case NL80211_IFTYPE_ADHOC:
662 cfg80211_leave_ibss(rdev, dev, false); 671 cfg80211_leave_ibss(rdev, dev, false);
@@ -680,5 +689,34 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
680 689
681 WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); 690 WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
682 691
692 if (!err && params && params->use_4addr != -1)
693 dev->ieee80211_ptr->use_4addr = params->use_4addr;
694
695 if (!err) {
696 dev->priv_flags &= ~IFF_DONT_BRIDGE;
697 switch (ntype) {
698 case NL80211_IFTYPE_STATION:
699 if (dev->ieee80211_ptr->use_4addr)
700 break;
701 /* fall through */
702 case NL80211_IFTYPE_ADHOC:
703 dev->priv_flags |= IFF_DONT_BRIDGE;
704 break;
705 case NL80211_IFTYPE_AP:
706 case NL80211_IFTYPE_AP_VLAN:
707 case NL80211_IFTYPE_WDS:
708 case NL80211_IFTYPE_MESH_POINT:
709 /* bridging OK */
710 break;
711 case NL80211_IFTYPE_MONITOR:
712 /* monitor can't bridge anyway */
713 break;
714 case NL80211_IFTYPE_UNSPECIFIED:
715 case __NL80211_IFTYPE_AFTER_LAST:
716 /* not happening */
717 break;
718 }
719 }
720
683 return err; 721 return err;
684} 722}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 561a45cf2a6a..29091ac9f989 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -437,6 +437,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
437{ 437{
438 struct wireless_dev *wdev = dev->ieee80211_ptr; 438 struct wireless_dev *wdev = dev->ieee80211_ptr;
439 int err, i; 439 int err, i;
440 bool rejoin = false;
440 441
441 if (!wdev->wext.keys) { 442 if (!wdev->wext.keys) {
442 wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys), 443 wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys),
@@ -466,8 +467,24 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
466 467
467 if (remove) { 468 if (remove) {
468 err = 0; 469 err = 0;
469 if (wdev->current_bss) 470 if (wdev->current_bss) {
471 /*
472 * If removing the current TX key, we will need to
473 * join a new IBSS without the privacy bit clear.
474 */
475 if (idx == wdev->wext.default_key &&
476 wdev->iftype == NL80211_IFTYPE_ADHOC) {
477 __cfg80211_leave_ibss(rdev, wdev->netdev, true);
478 rejoin = true;
479 }
470 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); 480 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
481 }
482 /*
483 * Applications using wireless extensions expect to be
484 * able to delete keys that don't exist, so allow that.
485 */
486 if (err == -ENOENT)
487 err = 0;
471 if (!err) { 488 if (!err) {
472 if (!addr) { 489 if (!addr) {
473 wdev->wext.keys->params[idx].key_len = 0; 490 wdev->wext.keys->params[idx].key_len = 0;
@@ -478,12 +495,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
478 else if (idx == wdev->wext.default_mgmt_key) 495 else if (idx == wdev->wext.default_mgmt_key)
479 wdev->wext.default_mgmt_key = -1; 496 wdev->wext.default_mgmt_key = -1;
480 } 497 }
481 /* 498
482 * Applications using wireless extensions expect to be 499 if (!err && rejoin)
483 * able to delete keys that don't exist, so allow that. 500 err = cfg80211_ibss_wext_join(rdev, wdev);
484 */
485 if (err == -ENOENT)
486 return 0;
487 501
488 return err; 502 return err;
489 } 503 }
@@ -511,11 +525,25 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
511 if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 || 525 if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
512 params->cipher == WLAN_CIPHER_SUITE_WEP104) && 526 params->cipher == WLAN_CIPHER_SUITE_WEP104) &&
513 (tx_key || (!addr && wdev->wext.default_key == -1))) { 527 (tx_key || (!addr && wdev->wext.default_key == -1))) {
514 if (wdev->current_bss) 528 if (wdev->current_bss) {
529 /*
530 * If we are getting a new TX key from not having
531 * had one before we need to join a new IBSS with
532 * the privacy bit set.
533 */
534 if (wdev->iftype == NL80211_IFTYPE_ADHOC &&
535 wdev->wext.default_key == -1) {
536 __cfg80211_leave_ibss(rdev, wdev->netdev, true);
537 rejoin = true;
538 }
515 err = rdev->ops->set_default_key(&rdev->wiphy, 539 err = rdev->ops->set_default_key(&rdev->wiphy,
516 dev, idx); 540 dev, idx);
517 if (!err) 541 }
542 if (!err) {
518 wdev->wext.default_key = idx; 543 wdev->wext.default_key = idx;
544 if (rejoin)
545 err = cfg80211_ibss_wext_join(rdev, wdev);
546 }
519 return err; 547 return err;
520 } 548 }
521 549
@@ -539,10 +567,13 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
539{ 567{
540 int err; 568 int err;
541 569
570 /* devlist mutex needed for possible IBSS re-join */
571 mutex_lock(&rdev->devlist_mtx);
542 wdev_lock(dev->ieee80211_ptr); 572 wdev_lock(dev->ieee80211_ptr);
543 err = __cfg80211_set_encryption(rdev, dev, addr, remove, 573 err = __cfg80211_set_encryption(rdev, dev, addr, remove,
544 tx_key, idx, params); 574 tx_key, idx, params);
545 wdev_unlock(dev->ieee80211_ptr); 575 wdev_unlock(dev->ieee80211_ptr);
576 mutex_unlock(&rdev->devlist_mtx);
546 577
547 return err; 578 return err;
548} 579}
@@ -904,8 +935,6 @@ static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
904 935
905static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) 936static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions)
906{ 937{
907 wdev->wext.connect.crypto.wpa_versions = 0;
908
909 if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | 938 if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA |
910 IW_AUTH_WPA_VERSION_WPA2| 939 IW_AUTH_WPA_VERSION_WPA2|
911 IW_AUTH_WPA_VERSION_DISABLED)) 940 IW_AUTH_WPA_VERSION_DISABLED))
@@ -933,8 +962,6 @@ static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions)
933 962
934static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) 963static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher)
935{ 964{
936 wdev->wext.connect.crypto.cipher_group = 0;
937
938 if (cipher & IW_AUTH_CIPHER_WEP40) 965 if (cipher & IW_AUTH_CIPHER_WEP40)
939 wdev->wext.connect.crypto.cipher_group = 966 wdev->wext.connect.crypto.cipher_group =
940 WLAN_CIPHER_SUITE_WEP40; 967 WLAN_CIPHER_SUITE_WEP40;
@@ -950,6 +977,8 @@ static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher)
950 else if (cipher & IW_AUTH_CIPHER_AES_CMAC) 977 else if (cipher & IW_AUTH_CIPHER_AES_CMAC)
951 wdev->wext.connect.crypto.cipher_group = 978 wdev->wext.connect.crypto.cipher_group =
952 WLAN_CIPHER_SUITE_AES_CMAC; 979 WLAN_CIPHER_SUITE_AES_CMAC;
980 else if (cipher & IW_AUTH_CIPHER_NONE)
981 wdev->wext.connect.crypto.cipher_group = 0;
953 else 982 else
954 return -EINVAL; 983 return -EINVAL;
955 984
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index a4e5ddc8d4f5..58dfb954974a 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -911,8 +911,9 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
911 */ 911 */
912static int wext_permission_check(unsigned int cmd) 912static int wext_permission_check(unsigned int cmd)
913{ 913{
914 if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) 914 if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE ||
915 && !capable(CAP_NET_ADMIN)) 915 cmd == SIOCGIWENCODEEXT) &&
916 !capable(CAP_NET_ADMIN))
916 return -EPERM; 917 return -EPERM;
917 918
918 return 0; 919 return 0;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e19d811788a5..e3219e4cd044 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -415,6 +415,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
415 struct sock *sk = sock->sk; 415 struct sock *sk = sock->sk;
416 int rc = -ENOPROTOOPT; 416 int rc = -ENOPROTOOPT;
417 417
418 lock_kernel();
418 if (level != SOL_X25 || optname != X25_QBITINCL) 419 if (level != SOL_X25 || optname != X25_QBITINCL)
419 goto out; 420 goto out;
420 421
@@ -429,6 +430,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
429 x25_sk(sk)->qbitincl = !!opt; 430 x25_sk(sk)->qbitincl = !!opt;
430 rc = 0; 431 rc = 0;
431out: 432out:
433 unlock_kernel();
432 return rc; 434 return rc;
433} 435}
434 436
@@ -438,6 +440,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
438 struct sock *sk = sock->sk; 440 struct sock *sk = sock->sk;
439 int val, len, rc = -ENOPROTOOPT; 441 int val, len, rc = -ENOPROTOOPT;
440 442
443 lock_kernel();
441 if (level != SOL_X25 || optname != X25_QBITINCL) 444 if (level != SOL_X25 || optname != X25_QBITINCL)
442 goto out; 445 goto out;
443 446
@@ -458,6 +461,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
458 val = x25_sk(sk)->qbitincl; 461 val = x25_sk(sk)->qbitincl;
459 rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; 462 rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
460out: 463out:
464 unlock_kernel();
461 return rc; 465 return rc;
462} 466}
463 467
@@ -466,12 +470,14 @@ static int x25_listen(struct socket *sock, int backlog)
466 struct sock *sk = sock->sk; 470 struct sock *sk = sock->sk;
467 int rc = -EOPNOTSUPP; 471 int rc = -EOPNOTSUPP;
468 472
473 lock_kernel();
469 if (sk->sk_state != TCP_LISTEN) { 474 if (sk->sk_state != TCP_LISTEN) {
470 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); 475 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
471 sk->sk_max_ack_backlog = backlog; 476 sk->sk_max_ack_backlog = backlog;
472 sk->sk_state = TCP_LISTEN; 477 sk->sk_state = TCP_LISTEN;
473 rc = 0; 478 rc = 0;
474 } 479 }
480 unlock_kernel();
475 481
476 return rc; 482 return rc;
477} 483}
@@ -501,13 +507,14 @@ out:
501 return sk; 507 return sk;
502} 508}
503 509
504static int x25_create(struct net *net, struct socket *sock, int protocol) 510static int x25_create(struct net *net, struct socket *sock, int protocol,
511 int kern)
505{ 512{
506 struct sock *sk; 513 struct sock *sk;
507 struct x25_sock *x25; 514 struct x25_sock *x25;
508 int rc = -ESOCKTNOSUPPORT; 515 int rc = -ESOCKTNOSUPPORT;
509 516
510 if (net != &init_net) 517 if (!net_eq(net, &init_net))
511 return -EAFNOSUPPORT; 518 return -EAFNOSUPPORT;
512 519
513 if (sock->type != SOCK_SEQPACKET || protocol) 520 if (sock->type != SOCK_SEQPACKET || protocol)
@@ -597,6 +604,7 @@ static int x25_release(struct socket *sock)
597 struct sock *sk = sock->sk; 604 struct sock *sk = sock->sk;
598 struct x25_sock *x25; 605 struct x25_sock *x25;
599 606
607 lock_kernel();
600 if (!sk) 608 if (!sk)
601 goto out; 609 goto out;
602 610
@@ -627,6 +635,7 @@ static int x25_release(struct socket *sock)
627 635
628 sock_orphan(sk); 636 sock_orphan(sk);
629out: 637out:
638 unlock_kernel();
630 return 0; 639 return 0;
631} 640}
632 641
@@ -634,18 +643,23 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
634{ 643{
635 struct sock *sk = sock->sk; 644 struct sock *sk = sock->sk;
636 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 645 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
646 int rc = 0;
637 647
648 lock_kernel();
638 if (!sock_flag(sk, SOCK_ZAPPED) || 649 if (!sock_flag(sk, SOCK_ZAPPED) ||
639 addr_len != sizeof(struct sockaddr_x25) || 650 addr_len != sizeof(struct sockaddr_x25) ||
640 addr->sx25_family != AF_X25) 651 addr->sx25_family != AF_X25) {
641 return -EINVAL; 652 rc = -EINVAL;
653 goto out;
654 }
642 655
643 x25_sk(sk)->source_addr = addr->sx25_addr; 656 x25_sk(sk)->source_addr = addr->sx25_addr;
644 x25_insert_socket(sk); 657 x25_insert_socket(sk);
645 sock_reset_flag(sk, SOCK_ZAPPED); 658 sock_reset_flag(sk, SOCK_ZAPPED);
646 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 659 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
647 660out:
648 return 0; 661 unlock_kernel();
662 return rc;
649} 663}
650 664
651static int x25_wait_for_connection_establishment(struct sock *sk) 665static int x25_wait_for_connection_establishment(struct sock *sk)
@@ -686,6 +700,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
686 struct x25_route *rt; 700 struct x25_route *rt;
687 int rc = 0; 701 int rc = 0;
688 702
703 lock_kernel();
689 lock_sock(sk); 704 lock_sock(sk);
690 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 705 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
691 sock->state = SS_CONNECTED; 706 sock->state = SS_CONNECTED;
@@ -763,6 +778,7 @@ out_put_route:
763 x25_route_put(rt); 778 x25_route_put(rt);
764out: 779out:
765 release_sock(sk); 780 release_sock(sk);
781 unlock_kernel();
766 return rc; 782 return rc;
767} 783}
768 784
@@ -802,6 +818,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
802 struct sk_buff *skb; 818 struct sk_buff *skb;
803 int rc = -EINVAL; 819 int rc = -EINVAL;
804 820
821 lock_kernel();
805 if (!sk || sk->sk_state != TCP_LISTEN) 822 if (!sk || sk->sk_state != TCP_LISTEN)
806 goto out; 823 goto out;
807 824
@@ -829,6 +846,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
829out2: 846out2:
830 release_sock(sk); 847 release_sock(sk);
831out: 848out:
849 unlock_kernel();
832 return rc; 850 return rc;
833} 851}
834 852
@@ -838,10 +856,14 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
838 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; 856 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
839 struct sock *sk = sock->sk; 857 struct sock *sk = sock->sk;
840 struct x25_sock *x25 = x25_sk(sk); 858 struct x25_sock *x25 = x25_sk(sk);
859 int rc = 0;
841 860
861 lock_kernel();
842 if (peer) { 862 if (peer) {
843 if (sk->sk_state != TCP_ESTABLISHED) 863 if (sk->sk_state != TCP_ESTABLISHED) {
844 return -ENOTCONN; 864 rc = -ENOTCONN;
865 goto out;
866 }
845 sx25->sx25_addr = x25->dest_addr; 867 sx25->sx25_addr = x25->dest_addr;
846 } else 868 } else
847 sx25->sx25_addr = x25->source_addr; 869 sx25->sx25_addr = x25->source_addr;
@@ -849,7 +871,21 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
849 sx25->sx25_family = AF_X25; 871 sx25->sx25_family = AF_X25;
850 *uaddr_len = sizeof(*sx25); 872 *uaddr_len = sizeof(*sx25);
851 873
852 return 0; 874out:
875 unlock_kernel();
876 return rc;
877}
878
879static unsigned int x25_datagram_poll(struct file *file, struct socket *sock,
880 poll_table *wait)
881{
882 int rc;
883
884 lock_kernel();
885 rc = datagram_poll(file, sock, wait);
886 unlock_kernel();
887
888 return rc;
853} 889}
854 890
855int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, 891int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
@@ -1002,6 +1038,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1002 size_t size; 1038 size_t size;
1003 int qbit = 0, rc = -EINVAL; 1039 int qbit = 0, rc = -EINVAL;
1004 1040
1041 lock_kernel();
1005 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) 1042 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
1006 goto out; 1043 goto out;
1007 1044
@@ -1166,6 +1203,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1166 release_sock(sk); 1203 release_sock(sk);
1167 rc = len; 1204 rc = len;
1168out: 1205out:
1206 unlock_kernel();
1169 return rc; 1207 return rc;
1170out_kfree_skb: 1208out_kfree_skb:
1171 kfree_skb(skb); 1209 kfree_skb(skb);
@@ -1186,6 +1224,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1186 unsigned char *asmptr; 1224 unsigned char *asmptr;
1187 int rc = -ENOTCONN; 1225 int rc = -ENOTCONN;
1188 1226
1227 lock_kernel();
1189 /* 1228 /*
1190 * This works for seqpacket too. The receiver has ordered the queue for 1229 * This works for seqpacket too. The receiver has ordered the queue for
1191 * us! We do one quick check first though 1230 * us! We do one quick check first though
@@ -1259,6 +1298,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1259out_free_dgram: 1298out_free_dgram:
1260 skb_free_datagram(sk, skb); 1299 skb_free_datagram(sk, skb);
1261out: 1300out:
1301 unlock_kernel();
1262 return rc; 1302 return rc;
1263} 1303}
1264 1304
@@ -1270,6 +1310,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1270 void __user *argp = (void __user *)arg; 1310 void __user *argp = (void __user *)arg;
1271 int rc; 1311 int rc;
1272 1312
1313 lock_kernel();
1273 switch (cmd) { 1314 switch (cmd) {
1274 case TIOCOUTQ: { 1315 case TIOCOUTQ: {
1275 int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1316 int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
@@ -1430,6 +1471,17 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1430 break; 1471 break;
1431 } 1472 }
1432 1473
1474 case SIOCX25SCAUSEDIAG: {
1475 struct x25_causediag causediag;
1476 rc = -EFAULT;
1477 if (copy_from_user(&causediag, argp, sizeof(causediag)))
1478 break;
1479 x25->causediag = causediag;
1480 rc = 0;
1481 break;
1482
1483 }
1484
1433 case SIOCX25SCUDMATCHLEN: { 1485 case SIOCX25SCUDMATCHLEN: {
1434 struct x25_subaddr sub_addr; 1486 struct x25_subaddr sub_addr;
1435 rc = -EINVAL; 1487 rc = -EINVAL;
@@ -1472,6 +1524,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1472 rc = -ENOIOCTLCMD; 1524 rc = -ENOIOCTLCMD;
1473 break; 1525 break;
1474 } 1526 }
1527 unlock_kernel();
1475 1528
1476 return rc; 1529 return rc;
1477} 1530}
@@ -1542,15 +1595,19 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1542 break; 1595 break;
1543 case SIOCGSTAMP: 1596 case SIOCGSTAMP:
1544 rc = -EINVAL; 1597 rc = -EINVAL;
1598 lock_kernel();
1545 if (sk) 1599 if (sk)
1546 rc = compat_sock_get_timestamp(sk, 1600 rc = compat_sock_get_timestamp(sk,
1547 (struct timeval __user*)argp); 1601 (struct timeval __user*)argp);
1602 unlock_kernel();
1548 break; 1603 break;
1549 case SIOCGSTAMPNS: 1604 case SIOCGSTAMPNS:
1550 rc = -EINVAL; 1605 rc = -EINVAL;
1606 lock_kernel();
1551 if (sk) 1607 if (sk)
1552 rc = compat_sock_get_timestampns(sk, 1608 rc = compat_sock_get_timestampns(sk,
1553 (struct timespec __user*)argp); 1609 (struct timespec __user*)argp);
1610 unlock_kernel();
1554 break; 1611 break;
1555 case SIOCGIFADDR: 1612 case SIOCGIFADDR:
1556 case SIOCSIFADDR: 1613 case SIOCSIFADDR:
@@ -1569,16 +1626,22 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1569 rc = -EPERM; 1626 rc = -EPERM;
1570 if (!capable(CAP_NET_ADMIN)) 1627 if (!capable(CAP_NET_ADMIN))
1571 break; 1628 break;
1629 lock_kernel();
1572 rc = x25_route_ioctl(cmd, argp); 1630 rc = x25_route_ioctl(cmd, argp);
1631 unlock_kernel();
1573 break; 1632 break;
1574 case SIOCX25GSUBSCRIP: 1633 case SIOCX25GSUBSCRIP:
1634 lock_kernel();
1575 rc = compat_x25_subscr_ioctl(cmd, argp); 1635 rc = compat_x25_subscr_ioctl(cmd, argp);
1636 unlock_kernel();
1576 break; 1637 break;
1577 case SIOCX25SSUBSCRIP: 1638 case SIOCX25SSUBSCRIP:
1578 rc = -EPERM; 1639 rc = -EPERM;
1579 if (!capable(CAP_NET_ADMIN)) 1640 if (!capable(CAP_NET_ADMIN))
1580 break; 1641 break;
1642 lock_kernel();
1581 rc = compat_x25_subscr_ioctl(cmd, argp); 1643 rc = compat_x25_subscr_ioctl(cmd, argp);
1644 unlock_kernel();
1582 break; 1645 break;
1583 case SIOCX25GFACILITIES: 1646 case SIOCX25GFACILITIES:
1584 case SIOCX25SFACILITIES: 1647 case SIOCX25SFACILITIES:
@@ -1587,6 +1650,7 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1587 case SIOCX25GCALLUSERDATA: 1650 case SIOCX25GCALLUSERDATA:
1588 case SIOCX25SCALLUSERDATA: 1651 case SIOCX25SCALLUSERDATA:
1589 case SIOCX25GCAUSEDIAG: 1652 case SIOCX25GCAUSEDIAG:
1653 case SIOCX25SCAUSEDIAG:
1590 case SIOCX25SCUDMATCHLEN: 1654 case SIOCX25SCUDMATCHLEN:
1591 case SIOCX25CALLACCPTAPPRV: 1655 case SIOCX25CALLACCPTAPPRV:
1592 case SIOCX25SENDCALLACCPT: 1656 case SIOCX25SENDCALLACCPT:
@@ -1600,7 +1664,7 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1600} 1664}
1601#endif 1665#endif
1602 1666
1603static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = { 1667static const struct proto_ops x25_proto_ops = {
1604 .family = AF_X25, 1668 .family = AF_X25,
1605 .owner = THIS_MODULE, 1669 .owner = THIS_MODULE,
1606 .release = x25_release, 1670 .release = x25_release,
@@ -1609,7 +1673,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
1609 .socketpair = sock_no_socketpair, 1673 .socketpair = sock_no_socketpair,
1610 .accept = x25_accept, 1674 .accept = x25_accept,
1611 .getname = x25_getname, 1675 .getname = x25_getname,
1612 .poll = datagram_poll, 1676 .poll = x25_datagram_poll,
1613 .ioctl = x25_ioctl, 1677 .ioctl = x25_ioctl,
1614#ifdef CONFIG_COMPAT 1678#ifdef CONFIG_COMPAT
1615 .compat_ioctl = compat_x25_ioctl, 1679 .compat_ioctl = compat_x25_ioctl,
@@ -1624,8 +1688,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
1624 .sendpage = sock_no_sendpage, 1688 .sendpage = sock_no_sendpage,
1625}; 1689};
1626 1690
1627SOCKOPS_WRAP(x25_proto, AF_X25);
1628
1629static struct packet_type x25_packet_type __read_mostly = { 1691static struct packet_type x25_packet_type __read_mostly = {
1630 .type = cpu_to_be16(ETH_P_X25), 1692 .type = cpu_to_be16(ETH_P_X25),
1631 .func = x25_lapb_receive_frame, 1693 .func = x25_lapb_receive_frame,
@@ -1659,20 +1721,31 @@ static int __init x25_init(void)
1659 if (rc != 0) 1721 if (rc != 0)
1660 goto out; 1722 goto out;
1661 1723
1662 sock_register(&x25_family_ops); 1724 rc = sock_register(&x25_family_ops);
1725 if (rc != 0)
1726 goto out_proto;
1663 1727
1664 dev_add_pack(&x25_packet_type); 1728 dev_add_pack(&x25_packet_type);
1665 1729
1666 register_netdevice_notifier(&x25_dev_notifier); 1730 rc = register_netdevice_notifier(&x25_dev_notifier);
1731 if (rc != 0)
1732 goto out_sock;
1667 1733
1668 printk(KERN_INFO "X.25 for Linux Version 0.2\n"); 1734 printk(KERN_INFO "X.25 for Linux Version 0.2\n");
1669 1735
1670#ifdef CONFIG_SYSCTL
1671 x25_register_sysctl(); 1736 x25_register_sysctl();
1672#endif 1737 rc = x25_proc_init();
1673 x25_proc_init(); 1738 if (rc != 0)
1739 goto out_dev;
1674out: 1740out:
1675 return rc; 1741 return rc;
1742out_dev:
1743 unregister_netdevice_notifier(&x25_dev_notifier);
1744out_sock:
1745 sock_unregister(AF_X25);
1746out_proto:
1747 proto_unregister(&x25_proto);
1748 goto out;
1676} 1749}
1677module_init(x25_init); 1750module_init(x25_init);
1678 1751
@@ -1682,9 +1755,7 @@ static void __exit x25_exit(void)
1682 x25_link_free(); 1755 x25_link_free();
1683 x25_route_free(); 1756 x25_route_free();
1684 1757
1685#ifdef CONFIG_SYSCTL
1686 x25_unregister_sysctl(); 1758 x25_unregister_sysctl();
1687#endif
1688 1759
1689 unregister_netdevice_notifier(&x25_dev_notifier); 1760 unregister_netdevice_notifier(&x25_dev_notifier);
1690 1761
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 66961ea28c91..b95fae9ab393 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -136,8 +136,10 @@ struct net_device *x25_dev_get(char *devname)
136#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 136#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
137 && dev->type != ARPHRD_ETHER 137 && dev->type != ARPHRD_ETHER
138#endif 138#endif
139 ))) 139 ))){
140 dev_put(dev); 140 dev_put(dev);
141 dev = NULL;
142 }
141 143
142 return dev; 144 return dev;
143} 145}
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 511a5986af3e..352b32d216fc 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -225,6 +225,12 @@ void x25_write_internal(struct sock *sk, int frametype)
225 break; 225 break;
226 226
227 case X25_CLEAR_REQUEST: 227 case X25_CLEAR_REQUEST:
228 dptr = skb_put(skb, 3);
229 *dptr++ = frametype;
230 *dptr++ = x25->causediag.cause;
231 *dptr++ = x25->causediag.diagnostic;
232 break;
233
228 case X25_RESET_REQUEST: 234 case X25_RESET_REQUEST:
229 dptr = skb_put(skb, 3); 235 dptr = skb_put(skb, 3);
230 *dptr++ = frametype; 236 *dptr++ = frametype;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index b39341072aa6..743c0134a6a9 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -200,6 +200,40 @@ static struct xfrm_algo_desc aalg_list[] = {
200 } 200 }
201}, 201},
202{ 202{
203 .name = "hmac(sha384)",
204
205 .uinfo = {
206 .auth = {
207 .icv_truncbits = 192,
208 .icv_fullbits = 384,
209 }
210 },
211
212 .desc = {
213 .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC,
214 .sadb_alg_ivlen = 0,
215 .sadb_alg_minbits = 384,
216 .sadb_alg_maxbits = 384
217 }
218},
219{
220 .name = "hmac(sha512)",
221
222 .uinfo = {
223 .auth = {
224 .icv_truncbits = 256,
225 .icv_fullbits = 512,
226 }
227 },
228
229 .desc = {
230 .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC,
231 .sadb_alg_ivlen = 0,
232 .sadb_alg_minbits = 512,
233 .sadb_alg_maxbits = 512
234 }
235},
236{
203 .name = "hmac(rmd160)", 237 .name = "hmac(rmd160)",
204 .compat = "rmd160", 238 .compat = "rmd160",
205 239
@@ -365,6 +399,7 @@ static struct xfrm_algo_desc ealg_list[] = {
365}, 399},
366{ 400{
367 .name = "cbc(camellia)", 401 .name = "cbc(camellia)",
402 .compat = "camellia",
368 403
369 .uinfo = { 404 .uinfo = {
370 .encr = { 405 .encr = {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f2f7c638083e..d847f1a52b44 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -21,6 +21,9 @@
21#include <linux/cache.h> 21#include <linux/cache.h>
22#include <linux/audit.h> 22#include <linux/audit.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <linux/ktime.h>
25#include <linux/interrupt.h>
26#include <linux/kernel.h>
24 27
25#include "xfrm_hash.h" 28#include "xfrm_hash.h"
26 29
@@ -352,7 +355,7 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
352 355
353static void xfrm_state_gc_destroy(struct xfrm_state *x) 356static void xfrm_state_gc_destroy(struct xfrm_state *x)
354{ 357{
355 del_timer_sync(&x->timer); 358 tasklet_hrtimer_cancel(&x->mtimer);
356 del_timer_sync(&x->rtimer); 359 del_timer_sync(&x->rtimer);
357 kfree(x->aalg); 360 kfree(x->aalg);
358 kfree(x->ealg); 361 kfree(x->ealg);
@@ -398,9 +401,10 @@ static inline unsigned long make_jiffies(long secs)
398 return secs*HZ; 401 return secs*HZ;
399} 402}
400 403
401static void xfrm_timer_handler(unsigned long data) 404static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
402{ 405{
403 struct xfrm_state *x = (struct xfrm_state*)data; 406 struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
407 struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
404 struct net *net = xs_net(x); 408 struct net *net = xs_net(x);
405 unsigned long now = get_seconds(); 409 unsigned long now = get_seconds();
406 long next = LONG_MAX; 410 long next = LONG_MAX;
@@ -451,8 +455,9 @@ static void xfrm_timer_handler(unsigned long data)
451 if (warn) 455 if (warn)
452 km_state_expired(x, 0, 0); 456 km_state_expired(x, 0, 0);
453resched: 457resched:
454 if (next != LONG_MAX) 458 if (next != LONG_MAX){
455 mod_timer(&x->timer, jiffies + make_jiffies(next)); 459 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
460 }
456 461
457 goto out; 462 goto out;
458 463
@@ -474,6 +479,7 @@ expired:
474 479
475out: 480out:
476 spin_unlock(&x->lock); 481 spin_unlock(&x->lock);
482 return HRTIMER_NORESTART;
477} 483}
478 484
479static void xfrm_replay_timer_handler(unsigned long data); 485static void xfrm_replay_timer_handler(unsigned long data);
@@ -492,7 +498,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
492 INIT_HLIST_NODE(&x->bydst); 498 INIT_HLIST_NODE(&x->bydst);
493 INIT_HLIST_NODE(&x->bysrc); 499 INIT_HLIST_NODE(&x->bysrc);
494 INIT_HLIST_NODE(&x->byspi); 500 INIT_HLIST_NODE(&x->byspi);
495 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x); 501 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
496 setup_timer(&x->rtimer, xfrm_replay_timer_handler, 502 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
497 (unsigned long)x); 503 (unsigned long)x);
498 x->curlft.add_time = get_seconds(); 504 x->curlft.add_time = get_seconds();
@@ -843,8 +849,7 @@ found:
843 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 849 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
844 } 850 }
845 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 851 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
846 x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ; 852 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
847 add_timer(&x->timer);
848 net->xfrm.state_num++; 853 net->xfrm.state_num++;
849 xfrm_hash_grow_check(net, x->bydst.next != NULL); 854 xfrm_hash_grow_check(net, x->bydst.next != NULL);
850 } else { 855 } else {
@@ -921,7 +926,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
921 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 926 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
922 } 927 }
923 928
924 mod_timer(&x->timer, jiffies + HZ); 929 tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
925 if (x->replay_maxage) 930 if (x->replay_maxage)
926 mod_timer(&x->rtimer, jiffies + x->replay_maxage); 931 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
927 932
@@ -1019,8 +1024,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1019 x->props.reqid = reqid; 1024 x->props.reqid = reqid;
1020 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1025 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1021 xfrm_state_hold(x); 1026 xfrm_state_hold(x);
1022 x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ; 1027 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1023 add_timer(&x->timer);
1024 list_add(&x->km.all, &net->xfrm.state_all); 1028 list_add(&x->km.all, &net->xfrm.state_all);
1025 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 1029 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
1026 h = xfrm_src_hash(net, daddr, saddr, family); 1030 h = xfrm_src_hash(net, daddr, saddr, family);
@@ -1110,7 +1114,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1110 x->props.saddr = orig->props.saddr; 1114 x->props.saddr = orig->props.saddr;
1111 1115
1112 if (orig->aalg) { 1116 if (orig->aalg) {
1113 x->aalg = xfrm_algo_clone(orig->aalg); 1117 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1114 if (!x->aalg) 1118 if (!x->aalg)
1115 goto error; 1119 goto error;
1116 } 1120 }
@@ -1300,7 +1304,7 @@ out:
1300 memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); 1304 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1301 x1->km.dying = 0; 1305 x1->km.dying = 0;
1302 1306
1303 mod_timer(&x1->timer, jiffies + HZ); 1307 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1304 if (x1->curlft.use_time) 1308 if (x1->curlft.use_time)
1305 xfrm_state_check_expire(x1); 1309 xfrm_state_check_expire(x1);
1306 1310
@@ -1325,7 +1329,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
1325 if (x->curlft.bytes >= x->lft.hard_byte_limit || 1329 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1326 x->curlft.packets >= x->lft.hard_packet_limit) { 1330 x->curlft.packets >= x->lft.hard_packet_limit) {
1327 x->km.state = XFRM_STATE_EXPIRED; 1331 x->km.state = XFRM_STATE_EXPIRED;
1328 mod_timer(&x->timer, jiffies); 1332 tasklet_hrtimer_start(&x->mtimer, ktime_set(0,0), HRTIMER_MODE_REL);
1329 return -EINVAL; 1333 return -EINVAL;
1330 } 1334 }
1331 1335
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b95a2d64eb59..1ada6186933c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -62,6 +62,22 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
62 return 0; 62 return 0;
63} 63}
64 64
65static int verify_auth_trunc(struct nlattr **attrs)
66{
67 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
68 struct xfrm_algo_auth *algp;
69
70 if (!rt)
71 return 0;
72
73 algp = nla_data(rt);
74 if (nla_len(rt) < xfrm_alg_auth_len(algp))
75 return -EINVAL;
76
77 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
78 return 0;
79}
80
65static int verify_aead(struct nlattr **attrs) 81static int verify_aead(struct nlattr **attrs)
66{ 82{
67 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 83 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
@@ -128,7 +144,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
128 err = -EINVAL; 144 err = -EINVAL;
129 switch (p->id.proto) { 145 switch (p->id.proto) {
130 case IPPROTO_AH: 146 case IPPROTO_AH:
131 if (!attrs[XFRMA_ALG_AUTH] || 147 if ((!attrs[XFRMA_ALG_AUTH] &&
148 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
132 attrs[XFRMA_ALG_AEAD] || 149 attrs[XFRMA_ALG_AEAD] ||
133 attrs[XFRMA_ALG_CRYPT] || 150 attrs[XFRMA_ALG_CRYPT] ||
134 attrs[XFRMA_ALG_COMP]) 151 attrs[XFRMA_ALG_COMP])
@@ -139,10 +156,12 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
139 if (attrs[XFRMA_ALG_COMP]) 156 if (attrs[XFRMA_ALG_COMP])
140 goto out; 157 goto out;
141 if (!attrs[XFRMA_ALG_AUTH] && 158 if (!attrs[XFRMA_ALG_AUTH] &&
159 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
142 !attrs[XFRMA_ALG_CRYPT] && 160 !attrs[XFRMA_ALG_CRYPT] &&
143 !attrs[XFRMA_ALG_AEAD]) 161 !attrs[XFRMA_ALG_AEAD])
144 goto out; 162 goto out;
145 if ((attrs[XFRMA_ALG_AUTH] || 163 if ((attrs[XFRMA_ALG_AUTH] ||
164 attrs[XFRMA_ALG_AUTH_TRUNC] ||
146 attrs[XFRMA_ALG_CRYPT]) && 165 attrs[XFRMA_ALG_CRYPT]) &&
147 attrs[XFRMA_ALG_AEAD]) 166 attrs[XFRMA_ALG_AEAD])
148 goto out; 167 goto out;
@@ -152,6 +171,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
152 if (!attrs[XFRMA_ALG_COMP] || 171 if (!attrs[XFRMA_ALG_COMP] ||
153 attrs[XFRMA_ALG_AEAD] || 172 attrs[XFRMA_ALG_AEAD] ||
154 attrs[XFRMA_ALG_AUTH] || 173 attrs[XFRMA_ALG_AUTH] ||
174 attrs[XFRMA_ALG_AUTH_TRUNC] ||
155 attrs[XFRMA_ALG_CRYPT]) 175 attrs[XFRMA_ALG_CRYPT])
156 goto out; 176 goto out;
157 break; 177 break;
@@ -161,6 +181,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
161 case IPPROTO_ROUTING: 181 case IPPROTO_ROUTING:
162 if (attrs[XFRMA_ALG_COMP] || 182 if (attrs[XFRMA_ALG_COMP] ||
163 attrs[XFRMA_ALG_AUTH] || 183 attrs[XFRMA_ALG_AUTH] ||
184 attrs[XFRMA_ALG_AUTH_TRUNC] ||
164 attrs[XFRMA_ALG_AEAD] || 185 attrs[XFRMA_ALG_AEAD] ||
165 attrs[XFRMA_ALG_CRYPT] || 186 attrs[XFRMA_ALG_CRYPT] ||
166 attrs[XFRMA_ENCAP] || 187 attrs[XFRMA_ENCAP] ||
@@ -176,6 +197,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
176 197
177 if ((err = verify_aead(attrs))) 198 if ((err = verify_aead(attrs)))
178 goto out; 199 goto out;
200 if ((err = verify_auth_trunc(attrs)))
201 goto out;
179 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) 202 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
180 goto out; 203 goto out;
181 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) 204 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
@@ -229,6 +252,66 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
229 return 0; 252 return 0;
230} 253}
231 254
255static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
256 struct nlattr *rta)
257{
258 struct xfrm_algo *ualg;
259 struct xfrm_algo_auth *p;
260 struct xfrm_algo_desc *algo;
261
262 if (!rta)
263 return 0;
264
265 ualg = nla_data(rta);
266
267 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
268 if (!algo)
269 return -ENOSYS;
270 *props = algo->desc.sadb_alg_id;
271
272 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
273 if (!p)
274 return -ENOMEM;
275
276 strcpy(p->alg_name, algo->name);
277 p->alg_key_len = ualg->alg_key_len;
278 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
279 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
280
281 *algpp = p;
282 return 0;
283}
284
285static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
286 struct nlattr *rta)
287{
288 struct xfrm_algo_auth *p, *ualg;
289 struct xfrm_algo_desc *algo;
290
291 if (!rta)
292 return 0;
293
294 ualg = nla_data(rta);
295
296 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
297 if (!algo)
298 return -ENOSYS;
299 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
300 return -EINVAL;
301 *props = algo->desc.sadb_alg_id;
302
303 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
304 if (!p)
305 return -ENOMEM;
306
307 strcpy(p->alg_name, algo->name);
308 if (!p->alg_trunc_len)
309 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
310
311 *algpp = p;
312 return 0;
313}
314
232static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, 315static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
233 struct nlattr *rta) 316 struct nlattr *rta)
234{ 317{
@@ -332,10 +415,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
332 if ((err = attach_aead(&x->aead, &x->props.ealgo, 415 if ((err = attach_aead(&x->aead, &x->props.ealgo,
333 attrs[XFRMA_ALG_AEAD]))) 416 attrs[XFRMA_ALG_AEAD])))
334 goto error; 417 goto error;
335 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo, 418 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
336 xfrm_aalg_get_byname, 419 attrs[XFRMA_ALG_AUTH_TRUNC])))
337 attrs[XFRMA_ALG_AUTH])))
338 goto error; 420 goto error;
421 if (!x->props.aalgo) {
422 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
423 attrs[XFRMA_ALG_AUTH])))
424 goto error;
425 }
339 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 426 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
340 xfrm_ealg_get_byname, 427 xfrm_ealg_get_byname,
341 attrs[XFRMA_ALG_CRYPT]))) 428 attrs[XFRMA_ALG_CRYPT])))
@@ -548,6 +635,24 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
548 return 0; 635 return 0;
549} 636}
550 637
638static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
639{
640 struct xfrm_algo *algo;
641 struct nlattr *nla;
642
643 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
644 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
645 if (!nla)
646 return -EMSGSIZE;
647
648 algo = nla_data(nla);
649 strcpy(algo->alg_name, auth->alg_name);
650 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
651 algo->alg_key_len = auth->alg_key_len;
652
653 return 0;
654}
655
551/* Don't change this without updating xfrm_sa_len! */ 656/* Don't change this without updating xfrm_sa_len! */
552static int copy_to_user_state_extra(struct xfrm_state *x, 657static int copy_to_user_state_extra(struct xfrm_state *x,
553 struct xfrm_usersa_info *p, 658 struct xfrm_usersa_info *p,
@@ -563,8 +668,13 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
563 668
564 if (x->aead) 669 if (x->aead)
565 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); 670 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
566 if (x->aalg) 671 if (x->aalg) {
567 NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg); 672 if (copy_to_user_auth(x->aalg, skb))
673 goto nla_put_failure;
674
675 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
676 xfrm_alg_auth_len(x->aalg), x->aalg);
677 }
568 if (x->ealg) 678 if (x->ealg)
569 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); 679 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
570 if (x->calg) 680 if (x->calg)
@@ -2117,8 +2227,11 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
2117 size_t l = 0; 2227 size_t l = 0;
2118 if (x->aead) 2228 if (x->aead)
2119 l += nla_total_size(aead_len(x->aead)); 2229 l += nla_total_size(aead_len(x->aead));
2120 if (x->aalg) 2230 if (x->aalg) {
2121 l += nla_total_size(xfrm_alg_len(x->aalg)); 2231 l += nla_total_size(sizeof(struct xfrm_algo) +
2232 (x->aalg->alg_key_len + 7) / 8);
2233 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2234 }
2122 if (x->ealg) 2235 if (x->ealg)
2123 l += nla_total_size(xfrm_alg_len(x->ealg)); 2236 l += nla_total_size(xfrm_alg_len(x->ealg));
2124 if (x->calg) 2237 if (x->calg)
@@ -2608,22 +2721,24 @@ static int __net_init xfrm_user_net_init(struct net *net)
2608 xfrm_netlink_rcv, NULL, THIS_MODULE); 2721 xfrm_netlink_rcv, NULL, THIS_MODULE);
2609 if (nlsk == NULL) 2722 if (nlsk == NULL)
2610 return -ENOMEM; 2723 return -ENOMEM;
2724 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2611 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 2725 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2612 return 0; 2726 return 0;
2613} 2727}
2614 2728
2615static void __net_exit xfrm_user_net_exit(struct net *net) 2729static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2616{ 2730{
2617 struct sock *nlsk = net->xfrm.nlsk; 2731 struct net *net;
2618 2732 list_for_each_entry(net, net_exit_list, exit_list)
2619 rcu_assign_pointer(net->xfrm.nlsk, NULL); 2733 rcu_assign_pointer(net->xfrm.nlsk, NULL);
2620 synchronize_rcu(); 2734 synchronize_net();
2621 netlink_kernel_release(nlsk); 2735 list_for_each_entry(net, net_exit_list, exit_list)
2736 netlink_kernel_release(net->xfrm.nlsk_stash);
2622} 2737}
2623 2738
2624static struct pernet_operations xfrm_user_net_ops = { 2739static struct pernet_operations xfrm_user_net_ops = {
2625 .init = xfrm_user_net_init, 2740 .init = xfrm_user_net_init,
2626 .exit = xfrm_user_net_exit, 2741 .exit_batch = xfrm_user_net_exit,
2627}; 2742};
2628 2743
2629static int __init xfrm_user_init(void) 2744static int __init xfrm_user_init(void)