aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c8
-rw-r--r--net/8021q/vlan_dev.c107
-rw-r--r--net/8021q/vlan_netlink.c20
-rw-r--r--net/9p/client.c13
-rw-r--r--net/appletalk/ddp.c27
-rw-r--r--net/atm/br2684.c40
-rw-r--r--net/atm/clip.c5
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpc.c6
-rw-r--r--net/bluetooth/Kconfig1
-rw-r--r--net/bluetooth/bnep/netdev.c3
-rw-r--r--net/bluetooth/hci_conn.c17
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hidp/core.c66
-rw-r--r--net/bluetooth/hidp/hidp.h2
-rw-r--r--net/bluetooth/l2cap.c1357
-rw-r--r--net/bluetooth/rfcomm/core.c70
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c49
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_sysfs_if.c17
-rw-r--r--net/can/af_can.c20
-rw-r--r--net/can/bcm.c85
-rw-r--r--net/can/proc.c281
-rw-r--r--net/can/raw.c1
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev.c616
-rw-r--r--net/core/drop_monitor.c14
-rw-r--r--net/core/ethtool.c16
-rw-r--r--net/core/gen_estimator.c12
-rw-r--r--net/core/gen_stats.c11
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netpoll.c5
-rw-r--r--net/core/pktgen.c700
-rw-r--r--net/core/rtnetlink.c16
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dcb/dcbnl.c130
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/decnet/dn_neigh.c6
-rw-r--r--net/dsa/dsa_priv.h6
-rw-r--r--net/dsa/tag_dsa.c2
-rw-r--r--net/dsa/tag_edsa.c2
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/econet/af_econet.c1
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ieee802154/af_ieee802154.c6
-rw-r--r--net/ieee802154/dgram.c82
-rw-r--r--net/ieee802154/netlink.c166
-rw-r--r--net/ieee802154/nl_policy.c4
-rw-r--r--net/ieee802154/raw.c17
-rw-r--r--net/ieee802154/wpan-class.c159
-rw-r--r--net/ipv4/af_inet.c112
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/fib_trie.c95
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/protocol.c19
-rw-r--r--net/ipv4/raw.c9
-rw-r--r--net/ipv4/route.c20
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c66
-rw-r--r--net/ipv4/tcp_minisocks.c25
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_timer.c16
-rw-r--r--net/ipv4/udp.c12
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/icmp.c17
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c5
-rw-r--r--net/ipv6/ndisc.c24
-rw-r--r--net/ipv6/protocol.c15
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/route.c34
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/ipv6/tcp_ipv6.c21
-rw-r--r--net/ipv6/udp.c17
-rw-r--r--net/ipx/af_ipx.c1
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irlan/irlan_eth.c6
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/irda/irnet/irnet_ppp.h2
-rw-r--r--net/irda/irproc.c14
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/llc/af_llc.c1
-rw-r--r--net/mac80211/Kconfig13
-rw-r--r--net/mac80211/agg-tx.c8
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/debugfs_netdev.c6
-rw-r--r--net/mac80211/driver-ops.h38
-rw-r--r--net/mac80211/driver-trace.h36
-rw-r--r--net/mac80211/ieee80211_i.h29
-rw-r--r--net/mac80211/iface.c56
-rw-r--r--net/mac80211/main.c48
-rw-r--r--net/mac80211/mesh.c194
-rw-r--r--net/mac80211/mesh.h32
-rw-r--r--net/mac80211/mesh_hwmp.c38
-rw-r--r--net/mac80211/mesh_pathtbl.c151
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c59
-rw-r--r--net/mac80211/pm.c13
-rw-r--r--net/mac80211/rc80211_minstrel.h1
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c4
-rw-r--r--net/mac80211/rc80211_pid_algo.c16
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c2
-rw-r--r--net/mac80211/rx.c90
-rw-r--r--net/mac80211/scan.c35
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/tx.c166
-rw-r--r--net/mac80211/util.c12
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netfilter/xt_quota.c2
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/nr_route.c21
-rw-r--r--net/phonet/pep-gprs.c2
-rw-r--r--net/phonet/socket.c7
-rw-r--r--net/rds/Kconfig26
-rw-r--r--net/rds/Makefile11
-rw-r--r--net/rds/af_rds.c8
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/cong.c1
-rw-r--r--net/rds/connection.c5
-rw-r--r--net/rds/ib.c1
-rw-r--r--net/rds/info.c3
-rw-r--r--net/rds/iw.c1
-rw-r--r--net/rds/message.c6
-rw-r--r--net/rds/page.c1
-rw-r--r--net/rds/rdma_transport.c6
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/recv.c4
-rw-r--r--net/rds/send.c3
-rw-r--r--net/rds/stats.c2
-rw-r--r--net/rds/tcp.c320
-rw-r--r--net/rds/tcp.h93
-rw-r--r--net/rds/tcp_connect.c153
-rw-r--r--net/rds/tcp_listen.c199
-rw-r--r--net/rds/tcp_recv.c356
-rw-r--r--net/rds/tcp_send.c263
-rw-r--r--net/rds/tcp_stats.c74
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rds/transport.c31
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/sch_api.c139
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cbq.c40
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_generic.c130
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c37
-rw-r--r--net/sched/sch_ingress.c14
-rw-r--r--net/sched/sch_mq.c235
-rw-r--r--net/sched/sch_multiq.c33
-rw-r--r--net/sched/sch_prio.c32
-rw-r--r--net/sched/sch_red.c21
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/sched/sch_tbf.c22
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/associola.c93
-rw-r--r--net/sctp/bind_addr.c21
-rw-r--r--net/sctp/chunk.c62
-rw-r--r--net/sctp/output.c188
-rw-r--r--net/sctp/outqueue.c47
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c12
-rw-r--r--net/sctp/sm_make_chunk.c25
-rw-r--r--net/sctp/sm_sideeffect.c56
-rw-r--r--net/sctp/sm_statefuns.c68
-rw-r--r--net/sctp/socket.c38
-rw-r--r--net/sctp/sysctl.c12
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/sched.c1
-rw-r--r--net/sunrpc/svc_xprt.c1
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/wanrouter/wanmain.c1
-rw-r--r--net/wireless/Kconfig32
-rw-r--r--net/wireless/Makefile3
-rw-r--r--net/wireless/chan.c89
-rw-r--r--net/wireless/core.c155
-rw-r--r--net/wireless/core.h31
-rw-r--r--net/wireless/ibss.c65
-rw-r--r--net/wireless/mlme.c26
-rw-r--r--net/wireless/nl80211.c106
-rw-r--r--net/wireless/reg.c14
-rw-r--r--net/wireless/reg.h3
-rw-r--r--net/wireless/scan.c91
-rw-r--r--net/wireless/sme.c143
-rw-r--r--net/wireless/util.c124
-rw-r--r--net/wireless/wext-compat.c70
-rw-r--r--net/wireless/wext-compat.h3
-rw-r--r--net/wireless/wext-sme.c83
-rw-r--r--net/x25/af_x25.c1
-rw-r--r--net/xfrm/xfrm_hash.h2
-rw-r--r--net/xfrm/xfrm_proc.c2
211 files changed, 7242 insertions, 2697 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a1f16303703a..8836575f9d79 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -330,12 +330,13 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
330 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); 330 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
331 } 331 }
332 332
333 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, 333 new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
334 vlan_setup); 334 vlan_setup, real_dev->num_tx_queues);
335 335
336 if (new_dev == NULL) 336 if (new_dev == NULL)
337 return -ENOBUFS; 337 return -ENOBUFS;
338 338
339 new_dev->real_num_tx_queues = real_dev->real_num_tx_queues;
339 dev_net_set(new_dev, net); 340 dev_net_set(new_dev, net);
340 /* need 4 bytes for extra VLAN header info, 341 /* need 4 bytes for extra VLAN header info,
341 * hope the underlying device can handle it. 342 * hope the underlying device can handle it.
@@ -391,6 +392,9 @@ static void vlan_transfer_features(struct net_device *dev,
391 vlandev->features &= ~dev->vlan_features; 392 vlandev->features &= ~dev->vlan_features;
392 vlandev->features |= dev->features & dev->vlan_features; 393 vlandev->features |= dev->features & dev->vlan_features;
393 vlandev->gso_max_size = dev->gso_max_size; 394 vlandev->gso_max_size = dev->gso_max_size;
395#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
396 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
397#endif
394 398
395 if (old_features != vlandev->features) 399 if (old_features != vlandev->features)
396 netdev_features_change(vlandev); 400 netdev_features_change(vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 96bad8f233e2..4198ec5c8abc 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -288,10 +288,14 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
288 return rc; 288 return rc;
289} 289}
290 290
291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 291static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
292 struct net_device *dev)
292{ 293{
293 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 294 int i = skb_get_queue_mapping(skb);
295 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 296 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
297 unsigned int len;
298 int ret;
295 299
296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 300 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
297 * 301 *
@@ -317,29 +321,43 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 321 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
318 } 322 }
319 323
320 txq->tx_packets++;
321 txq->tx_bytes += skb->len;
322 324
323 skb->dev = vlan_dev_info(dev)->real_dev; 325 skb->dev = vlan_dev_info(dev)->real_dev;
324 dev_queue_xmit(skb); 326 len = skb->len;
327 ret = dev_queue_xmit(skb);
328
329 if (likely(ret == NET_XMIT_SUCCESS)) {
330 txq->tx_packets++;
331 txq->tx_bytes += len;
332 } else
333 txq->tx_dropped++;
334
325 return NETDEV_TX_OK; 335 return NETDEV_TX_OK;
326} 336}
327 337
328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 338static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
329 struct net_device *dev) 339 struct net_device *dev)
330{ 340{
331 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 341 int i = skb_get_queue_mapping(skb);
342 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
332 u16 vlan_tci; 343 u16 vlan_tci;
344 unsigned int len;
345 int ret;
333 346
334 vlan_tci = vlan_dev_info(dev)->vlan_id; 347 vlan_tci = vlan_dev_info(dev)->vlan_id;
335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 348 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 349 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
337 350
338 txq->tx_packets++;
339 txq->tx_bytes += skb->len;
340
341 skb->dev = vlan_dev_info(dev)->real_dev; 351 skb->dev = vlan_dev_info(dev)->real_dev;
342 dev_queue_xmit(skb); 352 len = skb->len;
353 ret = dev_queue_xmit(skb);
354
355 if (likely(ret == NET_XMIT_SUCCESS)) {
356 txq->tx_packets++;
357 txq->tx_bytes += len;
358 } else
359 txq->tx_dropped++;
360
343 return NETDEV_TX_OK; 361 return NETDEV_TX_OK;
344} 362}
345 363
@@ -561,6 +579,55 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
561 return err; 579 return err;
562} 580}
563 581
582#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
583static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
584 struct scatterlist *sgl, unsigned int sgc)
585{
586 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
587 const struct net_device_ops *ops = real_dev->netdev_ops;
588 int rc = 0;
589
590 if (ops->ndo_fcoe_ddp_setup)
591 rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
592
593 return rc;
594}
595
596static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
597{
598 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
599 const struct net_device_ops *ops = real_dev->netdev_ops;
600 int len = 0;
601
602 if (ops->ndo_fcoe_ddp_done)
603 len = ops->ndo_fcoe_ddp_done(real_dev, xid);
604
605 return len;
606}
607
608static int vlan_dev_fcoe_enable(struct net_device *dev)
609{
610 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
611 const struct net_device_ops *ops = real_dev->netdev_ops;
612 int rc = -EINVAL;
613
614 if (ops->ndo_fcoe_enable)
615 rc = ops->ndo_fcoe_enable(real_dev);
616 return rc;
617}
618
619static int vlan_dev_fcoe_disable(struct net_device *dev)
620{
621 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
622 const struct net_device_ops *ops = real_dev->netdev_ops;
623 int rc = -EINVAL;
624
625 if (ops->ndo_fcoe_disable)
626 rc = ops->ndo_fcoe_disable(real_dev);
627 return rc;
628}
629#endif
630
564static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 631static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
565{ 632{
566 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 633 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -635,6 +702,10 @@ static int vlan_dev_init(struct net_device *dev)
635 if (is_zero_ether_addr(dev->broadcast)) 702 if (is_zero_ether_addr(dev->broadcast))
636 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 703 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
637 704
705#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
706 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
707#endif
708
638 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 709 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
639 dev->header_ops = real_dev->header_ops; 710 dev->header_ops = real_dev->header_ops;
640 dev->hard_header_len = real_dev->hard_header_len; 711 dev->hard_header_len = real_dev->hard_header_len;
@@ -715,6 +786,12 @@ static const struct net_device_ops vlan_netdev_ops = {
715 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 786 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
716 .ndo_do_ioctl = vlan_dev_ioctl, 787 .ndo_do_ioctl = vlan_dev_ioctl,
717 .ndo_neigh_setup = vlan_dev_neigh_setup, 788 .ndo_neigh_setup = vlan_dev_neigh_setup,
789#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
790 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
791 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
792 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
793 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
794#endif
718}; 795};
719 796
720static const struct net_device_ops vlan_netdev_accel_ops = { 797static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -731,6 +808,12 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
731 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 808 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
732 .ndo_do_ioctl = vlan_dev_ioctl, 809 .ndo_do_ioctl = vlan_dev_ioctl,
733 .ndo_neigh_setup = vlan_dev_neigh_setup, 810 .ndo_neigh_setup = vlan_dev_neigh_setup,
811#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
812 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
813 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
814 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
815 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
816#endif
734}; 817};
735 818
736void vlan_setup(struct net_device *dev) 819void vlan_setup(struct net_device *dev)
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index e9c91dcecc9b..343146e1bceb 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -100,6 +100,25 @@ static int vlan_changelink(struct net_device *dev,
100 return 0; 100 return 0;
101} 101}
102 102
103static int vlan_get_tx_queues(struct net *net,
104 struct nlattr *tb[],
105 unsigned int *num_tx_queues,
106 unsigned int *real_num_tx_queues)
107{
108 struct net_device *real_dev;
109
110 if (!tb[IFLA_LINK])
111 return -EINVAL;
112
113 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
114 if (!real_dev)
115 return -ENODEV;
116
117 *num_tx_queues = real_dev->num_tx_queues;
118 *real_num_tx_queues = real_dev->real_num_tx_queues;
119 return 0;
120}
121
103static int vlan_newlink(struct net_device *dev, 122static int vlan_newlink(struct net_device *dev,
104 struct nlattr *tb[], struct nlattr *data[]) 123 struct nlattr *tb[], struct nlattr *data[])
105{ 124{
@@ -216,6 +235,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
216 .maxtype = IFLA_VLAN_MAX, 235 .maxtype = IFLA_VLAN_MAX,
217 .policy = vlan_policy, 236 .policy = vlan_policy,
218 .priv_size = sizeof(struct vlan_dev_info), 237 .priv_size = sizeof(struct vlan_dev_info),
238 .get_tx_queues = vlan_get_tx_queues,
219 .setup = vlan_setup, 239 .setup = vlan_setup,
220 .validate = vlan_validate, 240 .validate = vlan_validate,
221 .newlink = vlan_newlink, 241 .newlink = vlan_newlink,
diff --git a/net/9p/client.c b/net/9p/client.c
index dd43a8289b0d..787ccddb85ea 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -117,9 +117,6 @@ static int parse_opts(char *opts, struct p9_client *clnt)
117 } 117 }
118 } 118 }
119 119
120 if (!clnt->trans_mod)
121 clnt->trans_mod = v9fs_get_default_trans();
122
123 kfree(options); 120 kfree(options);
124 return ret; 121 return ret;
125} 122}
@@ -689,6 +686,9 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
689 if (err < 0) 686 if (err < 0)
690 goto error; 687 goto error;
691 688
689 if (!clnt->trans_mod)
690 clnt->trans_mod = v9fs_get_default_trans();
691
692 if (clnt->trans_mod == NULL) { 692 if (clnt->trans_mod == NULL) {
693 err = -EPROTONOSUPPORT; 693 err = -EPROTONOSUPPORT;
694 P9_DPRINTK(P9_DEBUG_ERROR, 694 P9_DPRINTK(P9_DEBUG_ERROR,
@@ -1098,7 +1098,6 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1098 1098
1099 if (data) { 1099 if (data) {
1100 memmove(data, dataptr, count); 1100 memmove(data, dataptr, count);
1101 data += count;
1102 } 1101 }
1103 1102
1104 if (udata) { 1103 if (udata) {
@@ -1192,9 +1191,9 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1192 1191
1193 err = p9pdu_readf(req->rc, clnt->dotu, "wS", &ignored, ret); 1192 err = p9pdu_readf(req->rc, clnt->dotu, "wS", &ignored, ret);
1194 if (err) { 1193 if (err) {
1195 ret = ERR_PTR(err);
1196 p9pdu_dump(1, req->rc); 1194 p9pdu_dump(1, req->rc);
1197 goto free_and_error; 1195 p9_free_req(clnt, req);
1196 goto error;
1198 } 1197 }
1199 1198
1200 P9_DPRINTK(P9_DEBUG_9P, 1199 P9_DPRINTK(P9_DEBUG_9P,
@@ -1211,8 +1210,6 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1211 p9_free_req(clnt, req); 1210 p9_free_req(clnt, req);
1212 return ret; 1211 return ret;
1213 1212
1214free_and_error:
1215 p9_free_req(clnt, req);
1216error: 1213error:
1217 kfree(ret); 1214 kfree(ret);
1218 return ERR_PTR(err); 1215 return ERR_PTR(err);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 590b83963622..0d42d5da50ad 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -54,6 +54,7 @@
54#include <linux/capability.h> 54#include <linux/capability.h>
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/if_arp.h> 56#include <linux/if_arp.h>
57#include <linux/smp_lock.h>
57#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 58#include <linux/termios.h> /* For TIOCOUTQ/INQ */
58#include <net/datalink.h> 59#include <net/datalink.h>
59#include <net/psnap.h> 60#include <net/psnap.h>
@@ -1237,6 +1238,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1237 return -ENOBUFS; 1238 return -ENOBUFS;
1238 1239
1239 *uaddr_len = sizeof(struct sockaddr_at); 1240 *uaddr_len = sizeof(struct sockaddr_at);
1241 memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
1240 1242
1241 if (peer) { 1243 if (peer) {
1242 if (sk->sk_state != TCP_ESTABLISHED) 1244 if (sk->sk_state != TCP_ESTABLISHED)
@@ -1398,7 +1400,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1398 __u16 len_hops; 1400 __u16 len_hops;
1399 1401
1400 if (!net_eq(dev_net(dev), &init_net)) 1402 if (!net_eq(dev_net(dev), &init_net))
1401 goto freeit; 1403 goto drop;
1402 1404
1403 /* Don't mangle buffer if shared */ 1405 /* Don't mangle buffer if shared */
1404 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 1406 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
@@ -1406,7 +1408,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1406 1408
1407 /* Size check and make sure header is contiguous */ 1409 /* Size check and make sure header is contiguous */
1408 if (!pskb_may_pull(skb, sizeof(*ddp))) 1410 if (!pskb_may_pull(skb, sizeof(*ddp)))
1409 goto freeit; 1411 goto drop;
1410 1412
1411 ddp = ddp_hdr(skb); 1413 ddp = ddp_hdr(skb);
1412 1414
@@ -1424,7 +1426,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1424 if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) { 1426 if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) {
1425 pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, " 1427 pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, "
1426 "skb->len=%u)\n", len_hops & 1023, skb->len); 1428 "skb->len=%u)\n", len_hops & 1023, skb->len);
1427 goto freeit; 1429 goto drop;
1428 } 1430 }
1429 1431
1430 /* 1432 /*
@@ -1434,7 +1436,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1434 if (ddp->deh_sum && 1436 if (ddp->deh_sum &&
1435 atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum) 1437 atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
1436 /* Not a valid AppleTalk frame - dustbin time */ 1438 /* Not a valid AppleTalk frame - dustbin time */
1437 goto freeit; 1439 goto drop;
1438 1440
1439 /* Check the packet is aimed at us */ 1441 /* Check the packet is aimed at us */
1440 if (!ddp->deh_dnet) /* Net 0 is 'this network' */ 1442 if (!ddp->deh_dnet) /* Net 0 is 'this network' */
@@ -1447,7 +1449,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1447 * AppleTalk iface 1449 * AppleTalk iface
1448 */ 1450 */
1449 atalk_route_packet(skb, dev, ddp, len_hops, origlen); 1451 atalk_route_packet(skb, dev, ddp, len_hops, origlen);
1450 goto out; 1452 return NET_RX_SUCCESS;
1451 } 1453 }
1452 1454
1453 /* if IP over DDP is not selected this code will be optimized out */ 1455 /* if IP over DDP is not selected this code will be optimized out */
@@ -1463,18 +1465,21 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1463 1465
1464 sock = atalk_search_socket(&tosat, atif); 1466 sock = atalk_search_socket(&tosat, atif);
1465 if (!sock) /* But not one of our sockets */ 1467 if (!sock) /* But not one of our sockets */
1466 goto freeit; 1468 goto drop;
1467 1469
1468 /* Queue packet (standard) */ 1470 /* Queue packet (standard) */
1469 skb->sk = sock; 1471 skb->sk = sock;
1470 1472
1471 if (sock_queue_rcv_skb(sock, skb) < 0) 1473 if (sock_queue_rcv_skb(sock, skb) < 0)
1472 goto freeit; 1474 goto drop;
1473out: 1475
1474 return 0; 1476 return NET_RX_SUCCESS;
1475freeit: 1477
1478drop:
1476 kfree_skb(skb); 1479 kfree_skb(skb);
1477 goto out; 1480out:
1481 return NET_RX_DROP;
1482
1478} 1483}
1479 1484
1480/* 1485/*
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 848af113ba2a..26a646d4eb32 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -69,7 +69,7 @@ struct br2684_vcc {
69 struct net_device *device; 69 struct net_device *device;
70 /* keep old push, pop functions for chaining */ 70 /* keep old push, pop functions for chaining */
71 void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); 71 void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb);
72 /* void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); */ 72 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
73 enum br2684_encaps encaps; 73 enum br2684_encaps encaps;
74 struct list_head brvccs; 74 struct list_head brvccs;
75#ifdef CONFIG_ATM_BR2684_IPFILTER 75#ifdef CONFIG_ATM_BR2684_IPFILTER
@@ -142,6 +142,22 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
142 return NULL; 142 return NULL;
143} 143}
144 144
145/* chained vcc->pop function. Check if we should wake the netif_queue */
146static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
147{
148 struct br2684_vcc *brvcc = BR2684_VCC(vcc);
149 struct net_device *net_dev = skb->dev;
150
151 pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev);
152 brvcc->old_pop(vcc, skb);
153
154 if (!net_dev)
155 return;
156
157 if (atm_may_send(vcc, 0))
158 netif_wake_queue(net_dev);
159
160}
145/* 161/*
146 * Send a packet out a particular vcc. Not to useful right now, but paves 162 * Send a packet out a particular vcc. Not to useful right now, but paves
147 * the way for multiple vcc's per itf. Returns true if we can send, 163 * the way for multiple vcc's per itf. Returns true if we can send,
@@ -200,20 +216,19 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
200 216
201 ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; 217 ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
202 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); 218 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
203 if (!atm_may_send(atmvcc, skb->truesize)) {
204 /*
205 * We free this here for now, because we cannot know in a higher
206 * layer whether the skb pointer it supplied wasn't freed yet.
207 * Now, it always is.
208 */
209 dev_kfree_skb(skb);
210 return 0;
211 }
212 atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); 219 atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
213 ATM_SKB(skb)->atm_options = atmvcc->atm_options; 220 ATM_SKB(skb)->atm_options = atmvcc->atm_options;
214 dev->stats.tx_packets++; 221 dev->stats.tx_packets++;
215 dev->stats.tx_bytes += skb->len; 222 dev->stats.tx_bytes += skb->len;
216 atmvcc->send(atmvcc, skb); 223 atmvcc->send(atmvcc, skb);
224
225 if (!atm_may_send(atmvcc, 0)) {
226 netif_stop_queue(brvcc->device);
227 /*check for race with br2684_pop*/
228 if (atm_may_send(atmvcc, 0))
229 netif_start_queue(brvcc->device);
230 }
231
217 return 1; 232 return 1;
218} 233}
219 234
@@ -223,7 +238,8 @@ static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
223 return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ 238 return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
224} 239}
225 240
226static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) 241static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
242 struct net_device *dev)
227{ 243{
228 struct br2684_dev *brdev = BRPRIV(dev); 244 struct br2684_dev *brdev = BRPRIV(dev);
229 struct br2684_vcc *brvcc; 245 struct br2684_vcc *brvcc;
@@ -503,8 +519,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
503 atmvcc->user_back = brvcc; 519 atmvcc->user_back = brvcc;
504 brvcc->encaps = (enum br2684_encaps)be.encaps; 520 brvcc->encaps = (enum br2684_encaps)be.encaps;
505 brvcc->old_push = atmvcc->push; 521 brvcc->old_push = atmvcc->push;
522 brvcc->old_pop = atmvcc->pop;
506 barrier(); 523 barrier();
507 atmvcc->push = br2684_push; 524 atmvcc->push = br2684_push;
525 atmvcc->pop = br2684_pop;
508 526
509 __skb_queue_head_init(&queue); 527 __skb_queue_head_init(&queue);
510 rq = &sk_atm(atmvcc)->sk_receive_queue; 528 rq = &sk_atm(atmvcc)->sk_receive_queue;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 64910bb86089..64629c354343 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -267,7 +267,7 @@ static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
267 kfree_skb(skb); 267 kfree_skb(skb);
268} 268}
269 269
270static struct neigh_ops clip_neigh_ops = { 270static const struct neigh_ops clip_neigh_ops = {
271 .family = AF_INET, 271 .family = AF_INET,
272 .solicit = clip_neigh_solicit, 272 .solicit = clip_neigh_solicit,
273 .error_report = clip_neigh_error, 273 .error_report = clip_neigh_error,
@@ -360,7 +360,8 @@ static int clip_encap(struct atm_vcc *vcc, int mode)
360 return 0; 360 return 0;
361} 361}
362 362
363static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) 363static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
364 struct net_device *dev)
364{ 365{
365 struct clip_priv *clip_priv = PRIV(dev); 366 struct clip_priv *clip_priv = PRIV(dev);
366 struct atmarp_entry *entry; 367 struct atmarp_entry *entry;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 8e723c2654cb..b2d644560323 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -59,7 +59,8 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
59 */ 59 */
60 60
61static int lec_open(struct net_device *dev); 61static int lec_open(struct net_device *dev);
62static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); 62static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
63 struct net_device *dev);
63static int lec_close(struct net_device *dev); 64static int lec_close(struct net_device *dev);
64static void lec_init(struct net_device *dev); 65static void lec_init(struct net_device *dev);
65static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 66static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
@@ -247,7 +248,8 @@ static void lec_tx_timeout(struct net_device *dev)
247 netif_wake_queue(dev); 248 netif_wake_queue(dev);
248} 249}
249 250
250static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) 251static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
252 struct net_device *dev)
251{ 253{
252 struct sk_buff *skb2; 254 struct sk_buff *skb2;
253 struct lec_priv *priv = netdev_priv(dev); 255 struct lec_priv *priv = netdev_priv(dev);
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 1ac4b94bf626..38a6cb0863f0 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -73,7 +73,8 @@ static void mpoad_close(struct atm_vcc *vcc);
73static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb); 73static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
74 74
75static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); 75static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
76static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev); 76static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
77 struct net_device *dev);
77static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); 78static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev);
78static void mpc_timer_refresh(void); 79static void mpc_timer_refresh(void);
79static void mpc_cache_check( unsigned long checking_time ); 80static void mpc_cache_check( unsigned long checking_time );
@@ -528,7 +529,8 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
528/* 529/*
529 * Probably needs some error checks and locking, not sure... 530 * Probably needs some error checks and locking, not sure...
530 */ 531 */
531static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev) 532static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
533 struct net_device *dev)
532{ 534{
533 struct mpoa_client *mpc; 535 struct mpoa_client *mpc;
534 struct ethhdr *eth; 536 struct ethhdr *eth;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 59fdb1d2e8ed..ed371684c133 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -34,6 +34,7 @@ menuconfig BT
34config BT_L2CAP 34config BT_L2CAP
35 tristate "L2CAP protocol support" 35 tristate "L2CAP protocol support"
36 depends on BT 36 depends on BT
37 select CRC16
37 help 38 help
38 L2CAP (Logical Link Control and Adaptation Protocol) provides 39 L2CAP (Logical Link Control and Adaptation Protocol) provides
39 connection oriented and connection-less data transport. L2CAP 40 connection oriented and connection-less data transport. L2CAP
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 9c42990126a0..26fb831ef7e0 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -165,7 +165,8 @@ static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session
165} 165}
166#endif 166#endif
167 167
168static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) 168static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
169 struct net_device *dev)
169{ 170{
170 struct bnep_session *s = netdev_priv(dev); 171 struct bnep_session *s = netdev_priv(dev);
171 struct sock *sk = s->sock->sk; 172 struct sock *sk = s->sock->sk;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index fa47d5d84f5c..a9750984f772 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -246,6 +246,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
246 if (hdev->notify) 246 if (hdev->notify)
247 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 247 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
248 248
249 atomic_set(&conn->devref, 0);
250
249 hci_conn_init_sysfs(conn); 251 hci_conn_init_sysfs(conn);
250 252
251 tasklet_enable(&hdev->tx_task); 253 tasklet_enable(&hdev->tx_task);
@@ -288,7 +290,7 @@ int hci_conn_del(struct hci_conn *conn)
288 290
289 skb_queue_purge(&conn->data_q); 291 skb_queue_purge(&conn->data_q);
290 292
291 hci_conn_del_sysfs(conn); 293 hci_conn_put_device(conn);
292 294
293 hci_dev_put(hdev); 295 hci_dev_put(hdev);
294 296
@@ -583,6 +585,19 @@ void hci_conn_check_pending(struct hci_dev *hdev)
583 hci_dev_unlock(hdev); 585 hci_dev_unlock(hdev);
584} 586}
585 587
588void hci_conn_hold_device(struct hci_conn *conn)
589{
590 atomic_inc(&conn->devref);
591}
592EXPORT_SYMBOL(hci_conn_hold_device);
593
594void hci_conn_put_device(struct hci_conn *conn)
595{
596 if (atomic_dec_and_test(&conn->devref))
597 hci_conn_del_sysfs(conn);
598}
599EXPORT_SYMBOL(hci_conn_put_device);
600
586int hci_get_conn_list(void __user *arg) 601int hci_get_conn_list(void __user *arg)
587{ 602{
588 struct hci_conn_list_req req, *cl; 603 struct hci_conn_list_req req, *cl;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 406ad07cdea1..e1da8f68759c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -911,7 +911,7 @@ int hci_register_dev(struct hci_dev *hdev)
911 hdev->reassembly[i] = NULL; 911 hdev->reassembly[i] = NULL;
912 912
913 init_waitqueue_head(&hdev->req_wait_q); 913 init_waitqueue_head(&hdev->req_wait_q);
914 init_MUTEX(&hdev->req_lock); 914 mutex_init(&hdev->req_lock);
915 915
916 inquiry_cache_init(hdev); 916 inquiry_cache_init(hdev);
917 917
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 184ba0a88ec0..e99fe385fba2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -887,6 +887,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
887 } else 887 } else
888 conn->state = BT_CONNECTED; 888 conn->state = BT_CONNECTED;
889 889
890 hci_conn_hold_device(conn);
890 hci_conn_add_sysfs(conn); 891 hci_conn_add_sysfs(conn);
891 892
892 if (test_bit(HCI_AUTH, &hdev->flags)) 893 if (test_bit(HCI_AUTH, &hdev->flags))
@@ -1693,6 +1694,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1693 conn->handle = __le16_to_cpu(ev->handle); 1694 conn->handle = __le16_to_cpu(ev->handle);
1694 conn->state = BT_CONNECTED; 1695 conn->state = BT_CONNECTED;
1695 1696
1697 hci_conn_hold_device(conn);
1696 hci_conn_add_sysfs(conn); 1698 hci_conn_add_sysfs(conn);
1697 break; 1699 break;
1698 1700
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b18676870d55..09bedeb5579c 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -40,6 +40,7 @@
40 40
41#include <linux/input.h> 41#include <linux/input.h>
42#include <linux/hid.h> 42#include <linux/hid.h>
43#include <linux/hidraw.h>
43 44
44#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
45#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
@@ -92,10 +93,14 @@ static void __hidp_link_session(struct hidp_session *session)
92{ 93{
93 __module_get(THIS_MODULE); 94 __module_get(THIS_MODULE);
94 list_add(&session->list, &hidp_session_list); 95 list_add(&session->list, &hidp_session_list);
96
97 hci_conn_hold_device(session->conn);
95} 98}
96 99
97static void __hidp_unlink_session(struct hidp_session *session) 100static void __hidp_unlink_session(struct hidp_session *session)
98{ 101{
102 hci_conn_put_device(session->conn);
103
99 list_del(&session->list); 104 list_del(&session->list);
100 module_put(THIS_MODULE); 105 module_put(THIS_MODULE);
101} 106}
@@ -374,6 +379,7 @@ static void hidp_process_hid_control(struct hidp_session *session,
374 379
375 /* Kill session thread */ 380 /* Kill session thread */
376 atomic_inc(&session->terminate); 381 atomic_inc(&session->terminate);
382 hidp_schedule(session);
377 } 383 }
378} 384}
379 385
@@ -573,7 +579,11 @@ static int hidp_session(void *arg)
573 if (session->hid) { 579 if (session->hid) {
574 if (session->hid->claimed & HID_CLAIMED_INPUT) 580 if (session->hid->claimed & HID_CLAIMED_INPUT)
575 hidinput_disconnect(session->hid); 581 hidinput_disconnect(session->hid);
582 if (session->hid->claimed & HID_CLAIMED_HIDRAW)
583 hidraw_disconnect(session->hid);
584
576 hid_destroy_device(session->hid); 585 hid_destroy_device(session->hid);
586 session->hid = NULL;
577 } 587 }
578 588
579 /* Wakeup user-space polling for socket errors */ 589 /* Wakeup user-space polling for socket errors */
@@ -601,25 +611,27 @@ static struct device *hidp_get_device(struct hidp_session *session)
601{ 611{
602 bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; 612 bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
603 bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; 613 bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
614 struct device *device = NULL;
604 struct hci_dev *hdev; 615 struct hci_dev *hdev;
605 struct hci_conn *conn;
606 616
607 hdev = hci_get_route(dst, src); 617 hdev = hci_get_route(dst, src);
608 if (!hdev) 618 if (!hdev)
609 return NULL; 619 return NULL;
610 620
611 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 621 session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
622 if (session->conn)
623 device = &session->conn->dev;
612 624
613 hci_dev_put(hdev); 625 hci_dev_put(hdev);
614 626
615 return conn ? &conn->dev : NULL; 627 return device;
616} 628}
617 629
618static int hidp_setup_input(struct hidp_session *session, 630static int hidp_setup_input(struct hidp_session *session,
619 struct hidp_connadd_req *req) 631 struct hidp_connadd_req *req)
620{ 632{
621 struct input_dev *input; 633 struct input_dev *input;
622 int i; 634 int err, i;
623 635
624 input = input_allocate_device(); 636 input = input_allocate_device();
625 if (!input) 637 if (!input)
@@ -666,7 +678,13 @@ static int hidp_setup_input(struct hidp_session *session,
666 678
667 input->event = hidp_input_event; 679 input->event = hidp_input_event;
668 680
669 return input_register_device(input); 681 err = input_register_device(input);
682 if (err < 0) {
683 hci_conn_put_device(session->conn);
684 return err;
685 }
686
687 return 0;
670} 688}
671 689
672static int hidp_open(struct hid_device *hid) 690static int hidp_open(struct hid_device *hid)
@@ -748,13 +766,11 @@ static int hidp_setup_hid(struct hidp_session *session,
748{ 766{
749 struct hid_device *hid; 767 struct hid_device *hid;
750 bdaddr_t src, dst; 768 bdaddr_t src, dst;
751 int ret; 769 int err;
752 770
753 hid = hid_allocate_device(); 771 hid = hid_allocate_device();
754 if (IS_ERR(hid)) { 772 if (IS_ERR(hid))
755 ret = PTR_ERR(session->hid); 773 return PTR_ERR(session->hid);
756 goto err;
757 }
758 774
759 session->hid = hid; 775 session->hid = hid;
760 session->req = req; 776 session->req = req;
@@ -776,16 +792,17 @@ static int hidp_setup_hid(struct hidp_session *session,
776 hid->dev.parent = hidp_get_device(session); 792 hid->dev.parent = hidp_get_device(session);
777 hid->ll_driver = &hidp_hid_driver; 793 hid->ll_driver = &hidp_hid_driver;
778 794
779 ret = hid_add_device(hid); 795 err = hid_add_device(hid);
780 if (ret) 796 if (err < 0)
781 goto err_hid; 797 goto failed;
782 798
783 return 0; 799 return 0;
784err_hid: 800
801failed:
785 hid_destroy_device(hid); 802 hid_destroy_device(hid);
786 session->hid = NULL; 803 session->hid = NULL;
787err: 804
788 return ret; 805 return err;
789} 806}
790 807
791int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) 808int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
@@ -835,13 +852,13 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
835 if (req->rd_size > 0) { 852 if (req->rd_size > 0) {
836 err = hidp_setup_hid(session, req); 853 err = hidp_setup_hid(session, req);
837 if (err && err != -ENODEV) 854 if (err && err != -ENODEV)
838 goto err_skb; 855 goto purge;
839 } 856 }
840 857
841 if (!session->hid) { 858 if (!session->hid) {
842 err = hidp_setup_input(session, req); 859 err = hidp_setup_input(session, req);
843 if (err < 0) 860 if (err < 0)
844 goto err_skb; 861 goto purge;
845 } 862 }
846 863
847 __hidp_link_session(session); 864 __hidp_link_session(session);
@@ -869,13 +886,20 @@ unlink:
869 886
870 __hidp_unlink_session(session); 887 __hidp_unlink_session(session);
871 888
872 if (session->input) 889 if (session->input) {
873 input_unregister_device(session->input); 890 input_unregister_device(session->input);
874 if (session->hid) 891 session->input = NULL;
892 }
893
894 if (session->hid) {
875 hid_destroy_device(session->hid); 895 hid_destroy_device(session->hid);
876err_skb: 896 session->hid = NULL;
897 }
898
899purge:
877 skb_queue_purge(&session->ctrl_transmit); 900 skb_queue_purge(&session->ctrl_transmit);
878 skb_queue_purge(&session->intr_transmit); 901 skb_queue_purge(&session->intr_transmit);
902
879failed: 903failed:
880 up_write(&hidp_session_sem); 904 up_write(&hidp_session_sem);
881 905
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index e503c89057ad..faf3d74c3586 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -126,6 +126,8 @@ int hidp_get_conninfo(struct hidp_conninfo *ci);
126struct hidp_session { 126struct hidp_session {
127 struct list_head list; 127 struct list_head list;
128 128
129 struct hci_conn *conn;
130
129 struct socket *ctrl_sock; 131 struct socket *ctrl_sock;
130 struct socket *intr_sock; 132 struct socket *intr_sock;
131 133
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index bd0a4c1bced0..b03012564647 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -41,6 +41,7 @@
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/uaccess.h> 43#include <linux/uaccess.h>
44#include <linux/crc16.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -50,7 +51,9 @@
50#include <net/bluetooth/hci_core.h> 51#include <net/bluetooth/hci_core.h>
51#include <net/bluetooth/l2cap.h> 52#include <net/bluetooth/l2cap.h>
52 53
53#define VERSION "2.13" 54#define VERSION "2.14"
55
56static int enable_ertm = 0;
54 57
55static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 58static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
56static u8 l2cap_fixed_chan[8] = { 0x02, }; 59static u8 l2cap_fixed_chan[8] = { 0x02, };
@@ -331,6 +334,48 @@ static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16
331 return hci_send_acl(conn->hcon, skb, 0); 334 return hci_send_acl(conn->hcon, skb, 0);
332} 335}
333 336
337static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
338{
339 struct sk_buff *skb;
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
343
344 if (pi->fcs == L2CAP_FCS_CRC16)
345 hlen += 2;
346
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
348
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
351
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
353 if (!skb)
354 return -ENOMEM;
355
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
360
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
364 }
365
366 return hci_send_acl(pi->conn->hcon, skb, 0);
367}
368
369static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
370{
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
373 else
374 control |= L2CAP_SUPER_RCV_READY;
375
376 return l2cap_send_sframe(pi, control);
377}
378
334static void l2cap_do_start(struct sock *sk) 379static void l2cap_do_start(struct sock *sk)
335{ 380{
336 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -364,6 +409,16 @@ static void l2cap_do_start(struct sock *sk)
364 } 409 }
365} 410}
366 411
412static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
413{
414 struct l2cap_disconn_req req;
415
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
420}
421
367/* ---- L2CAP connections ---- */ 422/* ---- L2CAP connections ---- */
368static void l2cap_conn_start(struct l2cap_conn *conn) 423static void l2cap_conn_start(struct l2cap_conn *conn)
369{ 424{
@@ -648,15 +703,10 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
648 case BT_CONFIG: 703 case BT_CONFIG:
649 if (sk->sk_type == SOCK_SEQPACKET) { 704 if (sk->sk_type == SOCK_SEQPACKET) {
650 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
651 struct l2cap_disconn_req req;
652 706
653 sk->sk_state = BT_DISCONN; 707 sk->sk_state = BT_DISCONN;
654 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
655 709 l2cap_send_disconn_req(conn, sk);
656 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
657 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
658 l2cap_send_cmd(conn, l2cap_get_ident(conn),
659 L2CAP_DISCONN_REQ, sizeof(req), &req);
660 } else 710 } else
661 l2cap_chan_del(sk, reason); 711 l2cap_chan_del(sk, reason);
662 break; 712 break;
@@ -715,12 +765,16 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
715 765
716 pi->imtu = l2cap_pi(parent)->imtu; 766 pi->imtu = l2cap_pi(parent)->imtu;
717 pi->omtu = l2cap_pi(parent)->omtu; 767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
718 pi->sec_level = l2cap_pi(parent)->sec_level; 770 pi->sec_level = l2cap_pi(parent)->sec_level;
719 pi->role_switch = l2cap_pi(parent)->role_switch; 771 pi->role_switch = l2cap_pi(parent)->role_switch;
720 pi->force_reliable = l2cap_pi(parent)->force_reliable; 772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
721 } else { 773 } else {
722 pi->imtu = L2CAP_DEFAULT_MTU; 774 pi->imtu = L2CAP_DEFAULT_MTU;
723 pi->omtu = 0; 775 pi->omtu = 0;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
724 pi->sec_level = BT_SECURITY_LOW; 778 pi->sec_level = BT_SECURITY_LOW;
725 pi->role_switch = 0; 779 pi->role_switch = 0;
726 pi->force_reliable = 0; 780 pi->force_reliable = 0;
@@ -956,6 +1010,19 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
956 goto done; 1010 goto done;
957 } 1011 }
958 1012
1013 switch (l2cap_pi(sk)->mode) {
1014 case L2CAP_MODE_BASIC:
1015 break;
1016 case L2CAP_MODE_ERTM:
1017 case L2CAP_MODE_STREAMING:
1018 if (enable_ertm)
1019 break;
1020 /* fall through */
1021 default:
1022 err = -ENOTSUPP;
1023 goto done;
1024 }
1025
959 switch (sk->sk_state) { 1026 switch (sk->sk_state) {
960 case BT_CONNECT: 1027 case BT_CONNECT:
961 case BT_CONNECT2: 1028 case BT_CONNECT2:
@@ -1007,6 +1074,19 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
1007 goto done; 1074 goto done;
1008 } 1075 }
1009 1076
1077 switch (l2cap_pi(sk)->mode) {
1078 case L2CAP_MODE_BASIC:
1079 break;
1080 case L2CAP_MODE_ERTM:
1081 case L2CAP_MODE_STREAMING:
1082 if (enable_ertm)
1083 break;
1084 /* fall through */
1085 default:
1086 err = -ENOTSUPP;
1087 goto done;
1088 }
1089
1010 if (!l2cap_pi(sk)->psm) { 1090 if (!l2cap_pi(sk)->psm) {
1011 bdaddr_t *src = &bt_sk(sk)->src; 1091 bdaddr_t *src = &bt_sk(sk)->src;
1012 u16 psm; 1092 u16 psm;
@@ -1117,39 +1197,219 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
1117 return 0; 1197 return 0;
1118} 1198}
1119 1199
1120static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len) 1200static void l2cap_monitor_timeout(unsigned long arg)
1121{ 1201{
1122 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1202 struct sock *sk = (void *) arg;
1123 struct sk_buff *skb, **frag; 1203 u16 control;
1124 int err, hlen, count, sent = 0;
1125 struct l2cap_hdr *lh;
1126 1204
1127 BT_DBG("sk %p len %d", sk, len); 1205 bh_lock_sock(sk);
1206 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1207 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1208 return;
1209 }
1128 1210
1129 /* First fragment (with L2CAP header) */ 1211 l2cap_pi(sk)->retry_count++;
1130 if (sk->sk_type == SOCK_DGRAM) 1212 __mod_monitor_timer();
1131 hlen = L2CAP_HDR_SIZE + 2;
1132 else
1133 hlen = L2CAP_HDR_SIZE;
1134 1213
1135 count = min_t(unsigned int, (conn->mtu - hlen), len); 1214 control = L2CAP_CTRL_POLL;
1215 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1216 bh_unlock_sock(sk);
1217}
1136 1218
1137 skb = bt_skb_send_alloc(sk, hlen + count, 1219static void l2cap_retrans_timeout(unsigned long arg)
1138 msg->msg_flags & MSG_DONTWAIT, &err); 1220{
1139 if (!skb) 1221 struct sock *sk = (void *) arg;
1140 return err; 1222 u16 control;
1141 1223
1142 /* Create L2CAP header */ 1224 bh_lock_sock(sk);
1143 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1225 l2cap_pi(sk)->retry_count = 1;
1144 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1226 __mod_monitor_timer();
1145 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1227
1228 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1229
1230 control = L2CAP_CTRL_POLL;
1231 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1232 bh_unlock_sock(sk);
1233}
1234
1235static void l2cap_drop_acked_frames(struct sock *sk)
1236{
1237 struct sk_buff *skb;
1238
1239 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1240 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1241 break;
1242
1243 skb = skb_dequeue(TX_QUEUE(sk));
1244 kfree_skb(skb);
1245
1246 l2cap_pi(sk)->unacked_frames--;
1247 }
1248
1249 if (!l2cap_pi(sk)->unacked_frames)
1250 del_timer(&l2cap_pi(sk)->retrans_timer);
1146 1251
1147 if (sk->sk_type == SOCK_DGRAM) 1252 return;
1148 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2)); 1253}
1254
1255static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1256{
1257 struct l2cap_pinfo *pi = l2cap_pi(sk);
1258 int err;
1259
1260 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1261
1262 err = hci_send_acl(pi->conn->hcon, skb, 0);
1263 if (err < 0)
1264 kfree_skb(skb);
1265
1266 return err;
1267}
1268
1269static int l2cap_streaming_send(struct sock *sk)
1270{
1271 struct sk_buff *skb, *tx_skb;
1272 struct l2cap_pinfo *pi = l2cap_pi(sk);
1273 u16 control, fcs;
1274 int err;
1275
1276 while ((skb = sk->sk_send_head)) {
1277 tx_skb = skb_clone(skb, GFP_ATOMIC);
1278
1279 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1280 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1281 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1282
1283 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1284 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1285 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1286 }
1287
1288 err = l2cap_do_send(sk, tx_skb);
1289 if (err < 0) {
1290 l2cap_send_disconn_req(pi->conn, sk);
1291 return err;
1292 }
1293
1294 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1295
1296 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1297 sk->sk_send_head = NULL;
1298 else
1299 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1300
1301 skb = skb_dequeue(TX_QUEUE(sk));
1302 kfree_skb(skb);
1303 }
1304 return 0;
1305}
1306
1307static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1308{
1309 struct l2cap_pinfo *pi = l2cap_pi(sk);
1310 struct sk_buff *skb, *tx_skb;
1311 u16 control, fcs;
1312 int err;
1313
1314 skb = skb_peek(TX_QUEUE(sk));
1315 do {
1316 if (bt_cb(skb)->tx_seq != tx_seq) {
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 break;
1319 skb = skb_queue_next(TX_QUEUE(sk), skb);
1320 continue;
1321 }
1322
1323 if (pi->remote_max_tx &&
1324 bt_cb(skb)->retries == pi->remote_max_tx) {
1325 l2cap_send_disconn_req(pi->conn, sk);
1326 break;
1327 }
1328
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1331 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1332 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1333 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1334 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1335
1336 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1337 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1338 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1339 }
1340
1341 err = l2cap_do_send(sk, tx_skb);
1342 if (err < 0) {
1343 l2cap_send_disconn_req(pi->conn, sk);
1344 return err;
1345 }
1346 break;
1347 } while(1);
1348 return 0;
1349}
1350
1351static int l2cap_ertm_send(struct sock *sk)
1352{
1353 struct sk_buff *skb, *tx_skb;
1354 struct l2cap_pinfo *pi = l2cap_pi(sk);
1355 u16 control, fcs;
1356 int err;
1357
1358 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1359 return 0;
1360
1361 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
1362 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1363 tx_skb = skb_clone(skb, GFP_ATOMIC);
1364
1365 if (pi->remote_max_tx &&
1366 bt_cb(skb)->retries == pi->remote_max_tx) {
1367 l2cap_send_disconn_req(pi->conn, sk);
1368 break;
1369 }
1370
1371 bt_cb(skb)->retries++;
1372
1373 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1374 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1375 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1376 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1377
1378
1379 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1380 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1381 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1382 }
1383
1384 err = l2cap_do_send(sk, tx_skb);
1385 if (err < 0) {
1386 l2cap_send_disconn_req(pi->conn, sk);
1387 return err;
1388 }
1389 __mod_retrans_timer();
1390
1391 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1393
1394 pi->unacked_frames++;
1395
1396 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1397 sk->sk_send_head = NULL;
1398 else
1399 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1400 }
1401
1402 return 0;
1403}
1404
1405static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1406{
1407 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1408 struct sk_buff **frag;
1409 int err, sent = 0;
1149 1410
1150 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { 1411 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1151 err = -EFAULT; 1412 return -EFAULT;
1152 goto fail;
1153 } 1413 }
1154 1414
1155 sent += count; 1415 sent += count;
@@ -1162,33 +1422,173 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1162 1422
1163 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); 1423 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1164 if (!*frag) 1424 if (!*frag)
1165 goto fail; 1425 return -EFAULT;
1166 1426 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1167 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) { 1427 return -EFAULT;
1168 err = -EFAULT;
1169 goto fail;
1170 }
1171 1428
1172 sent += count; 1429 sent += count;
1173 len -= count; 1430 len -= count;
1174 1431
1175 frag = &(*frag)->next; 1432 frag = &(*frag)->next;
1176 } 1433 }
1177 err = hci_send_acl(conn->hcon, skb, 0);
1178 if (err < 0)
1179 goto fail;
1180 1434
1181 return sent; 1435 return sent;
1436}
1182 1437
1183fail: 1438static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1184 kfree_skb(skb); 1439{
1185 return err; 1440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1441 struct sk_buff *skb;
1442 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1443 struct l2cap_hdr *lh;
1444
1445 BT_DBG("sk %p len %d", sk, (int)len);
1446
1447 count = min_t(unsigned int, (conn->mtu - hlen), len);
1448 skb = bt_skb_send_alloc(sk, count + hlen,
1449 msg->msg_flags & MSG_DONTWAIT, &err);
1450 if (!skb)
1451 return ERR_PTR(-ENOMEM);
1452
1453 /* Create L2CAP header */
1454 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1455 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1456 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1457 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1458
1459 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1460 if (unlikely(err < 0)) {
1461 kfree_skb(skb);
1462 return ERR_PTR(err);
1463 }
1464 return skb;
1465}
1466
1467static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1468{
1469 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1470 struct sk_buff *skb;
1471 int err, count, hlen = L2CAP_HDR_SIZE;
1472 struct l2cap_hdr *lh;
1473
1474 BT_DBG("sk %p len %d", sk, (int)len);
1475
1476 count = min_t(unsigned int, (conn->mtu - hlen), len);
1477 skb = bt_skb_send_alloc(sk, count + hlen,
1478 msg->msg_flags & MSG_DONTWAIT, &err);
1479 if (!skb)
1480 return ERR_PTR(-ENOMEM);
1481
1482 /* Create L2CAP header */
1483 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1484 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1485 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1486
1487 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1488 if (unlikely(err < 0)) {
1489 kfree_skb(skb);
1490 return ERR_PTR(err);
1491 }
1492 return skb;
1493}
1494
1495static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1496{
1497 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1498 struct sk_buff *skb;
1499 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1500 struct l2cap_hdr *lh;
1501
1502 BT_DBG("sk %p len %d", sk, (int)len);
1503
1504 if (sdulen)
1505 hlen += 2;
1506
1507 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1508 hlen += 2;
1509
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1513 if (!skb)
1514 return ERR_PTR(-ENOMEM);
1515
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1520 put_unaligned_le16(control, skb_put(skb, 2));
1521 if (sdulen)
1522 put_unaligned_le16(sdulen, skb_put(skb, 2));
1523
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1526 kfree_skb(skb);
1527 return ERR_PTR(err);
1528 }
1529
1530 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1531 put_unaligned_le16(0, skb_put(skb, 2));
1532
1533 bt_cb(skb)->retries = 0;
1534 return skb;
1535}
1536
1537static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1538{
1539 struct l2cap_pinfo *pi = l2cap_pi(sk);
1540 struct sk_buff *skb;
1541 struct sk_buff_head sar_queue;
1542 u16 control;
1543 size_t size = 0;
1544
1545 __skb_queue_head_init(&sar_queue);
1546 control = L2CAP_SDU_START;
1547 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1548 if (IS_ERR(skb))
1549 return PTR_ERR(skb);
1550
1551 __skb_queue_tail(&sar_queue, skb);
1552 len -= pi->max_pdu_size;
1553 size +=pi->max_pdu_size;
1554 control = 0;
1555
1556 while (len > 0) {
1557 size_t buflen;
1558
1559 if (len > pi->max_pdu_size) {
1560 control |= L2CAP_SDU_CONTINUE;
1561 buflen = pi->max_pdu_size;
1562 } else {
1563 control |= L2CAP_SDU_END;
1564 buflen = len;
1565 }
1566
1567 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1568 if (IS_ERR(skb)) {
1569 skb_queue_purge(&sar_queue);
1570 return PTR_ERR(skb);
1571 }
1572
1573 __skb_queue_tail(&sar_queue, skb);
1574 len -= buflen;
1575 size += buflen;
1576 control = 0;
1577 }
1578 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1579 if (sk->sk_send_head == NULL)
1580 sk->sk_send_head = sar_queue.next;
1581
1582 return size;
1186} 1583}
1187 1584
1188static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) 1585static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1189{ 1586{
1190 struct sock *sk = sock->sk; 1587 struct sock *sk = sock->sk;
1191 int err = 0; 1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct sk_buff *skb;
1590 u16 control;
1591 int err;
1192 1592
1193 BT_DBG("sock %p, sk %p", sock, sk); 1593 BT_DBG("sock %p, sk %p", sock, sk);
1194 1594
@@ -1200,16 +1600,73 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1200 return -EOPNOTSUPP; 1600 return -EOPNOTSUPP;
1201 1601
1202 /* Check outgoing MTU */ 1602 /* Check outgoing MTU */
1203 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu) 1603 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1604 && len > pi->omtu)
1204 return -EINVAL; 1605 return -EINVAL;
1205 1606
1206 lock_sock(sk); 1607 lock_sock(sk);
1207 1608
1208 if (sk->sk_state == BT_CONNECTED) 1609 if (sk->sk_state != BT_CONNECTED) {
1209 err = l2cap_do_send(sk, msg, len);
1210 else
1211 err = -ENOTCONN; 1610 err = -ENOTCONN;
1611 goto done;
1612 }
1613
1614 /* Connectionless channel */
1615 if (sk->sk_type == SOCK_DGRAM) {
1616 skb = l2cap_create_connless_pdu(sk, msg, len);
1617 err = l2cap_do_send(sk, skb);
1618 goto done;
1619 }
1620
1621 switch (pi->mode) {
1622 case L2CAP_MODE_BASIC:
1623 /* Create a basic PDU */
1624 skb = l2cap_create_basic_pdu(sk, msg, len);
1625 if (IS_ERR(skb)) {
1626 err = PTR_ERR(skb);
1627 goto done;
1628 }
1629
1630 err = l2cap_do_send(sk, skb);
1631 if (!err)
1632 err = len;
1633 break;
1634
1635 case L2CAP_MODE_ERTM:
1636 case L2CAP_MODE_STREAMING:
1637 /* Entire SDU fits into one PDU */
1638 if (len <= pi->max_pdu_size) {
1639 control = L2CAP_SDU_UNSEGMENTED;
1640 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1641 if (IS_ERR(skb)) {
1642 err = PTR_ERR(skb);
1643 goto done;
1644 }
1645 __skb_queue_tail(TX_QUEUE(sk), skb);
1646 if (sk->sk_send_head == NULL)
1647 sk->sk_send_head = skb;
1648 } else {
1649 /* Segment SDU into multiples PDUs */
1650 err = l2cap_sar_segment_sdu(sk, msg, len);
1651 if (err < 0)
1652 goto done;
1653 }
1654
1655 if (pi->mode == L2CAP_MODE_STREAMING)
1656 err = l2cap_streaming_send(sk);
1657 else
1658 err = l2cap_ertm_send(sk);
1659
1660 if (!err)
1661 err = len;
1662 break;
1663
1664 default:
1665 BT_DBG("bad state %1.1x", pi->mode);
1666 err = -EINVAL;
1667 }
1212 1668
1669done:
1213 release_sock(sk); 1670 release_sock(sk);
1214 return err; 1671 return err;
1215} 1672}
@@ -1257,7 +1714,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1257 opts.imtu = l2cap_pi(sk)->imtu; 1714 opts.imtu = l2cap_pi(sk)->imtu;
1258 opts.omtu = l2cap_pi(sk)->omtu; 1715 opts.omtu = l2cap_pi(sk)->omtu;
1259 opts.flush_to = l2cap_pi(sk)->flush_to; 1716 opts.flush_to = l2cap_pi(sk)->flush_to;
1260 opts.mode = L2CAP_MODE_BASIC; 1717 opts.mode = l2cap_pi(sk)->mode;
1718 opts.fcs = l2cap_pi(sk)->fcs;
1261 1719
1262 len = min_t(unsigned int, sizeof(opts), optlen); 1720 len = min_t(unsigned int, sizeof(opts), optlen);
1263 if (copy_from_user((char *) &opts, optval, len)) { 1721 if (copy_from_user((char *) &opts, optval, len)) {
@@ -1265,8 +1723,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1265 break; 1723 break;
1266 } 1724 }
1267 1725
1268 l2cap_pi(sk)->imtu = opts.imtu; 1726 l2cap_pi(sk)->imtu = opts.imtu;
1269 l2cap_pi(sk)->omtu = opts.omtu; 1727 l2cap_pi(sk)->omtu = opts.omtu;
1728 l2cap_pi(sk)->mode = opts.mode;
1729 l2cap_pi(sk)->fcs = opts.fcs;
1270 break; 1730 break;
1271 1731
1272 case L2CAP_LM: 1732 case L2CAP_LM:
@@ -1379,7 +1839,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
1379 opts.imtu = l2cap_pi(sk)->imtu; 1839 opts.imtu = l2cap_pi(sk)->imtu;
1380 opts.omtu = l2cap_pi(sk)->omtu; 1840 opts.omtu = l2cap_pi(sk)->omtu;
1381 opts.flush_to = l2cap_pi(sk)->flush_to; 1841 opts.flush_to = l2cap_pi(sk)->flush_to;
1382 opts.mode = L2CAP_MODE_BASIC; 1842 opts.mode = l2cap_pi(sk)->mode;
1843 opts.fcs = l2cap_pi(sk)->fcs;
1383 1844
1384 len = min_t(unsigned int, len, sizeof(opts)); 1845 len = min_t(unsigned int, len, sizeof(opts));
1385 if (copy_to_user(optval, (char *) &opts, len)) 1846 if (copy_to_user(optval, (char *) &opts, len))
@@ -1708,16 +2169,108 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1708 *ptr += L2CAP_CONF_OPT_SIZE + len; 2169 *ptr += L2CAP_CONF_OPT_SIZE + len;
1709} 2170}
1710 2171
2172static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2173{
2174 u32 local_feat_mask = l2cap_feat_mask;
2175 if (enable_ertm)
2176 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2177
2178 switch (mode) {
2179 case L2CAP_MODE_ERTM:
2180 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2181 case L2CAP_MODE_STREAMING:
2182 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2183 default:
2184 return 0x00;
2185 }
2186}
2187
2188static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2189{
2190 switch (mode) {
2191 case L2CAP_MODE_STREAMING:
2192 case L2CAP_MODE_ERTM:
2193 if (l2cap_mode_supported(mode, remote_feat_mask))
2194 return mode;
2195 /* fall through */
2196 default:
2197 return L2CAP_MODE_BASIC;
2198 }
2199}
2200
1711static int l2cap_build_conf_req(struct sock *sk, void *data) 2201static int l2cap_build_conf_req(struct sock *sk, void *data)
1712{ 2202{
1713 struct l2cap_pinfo *pi = l2cap_pi(sk); 2203 struct l2cap_pinfo *pi = l2cap_pi(sk);
1714 struct l2cap_conf_req *req = data; 2204 struct l2cap_conf_req *req = data;
2205 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
1715 void *ptr = req->data; 2206 void *ptr = req->data;
1716 2207
1717 BT_DBG("sk %p", sk); 2208 BT_DBG("sk %p", sk);
1718 2209
1719 if (pi->imtu != L2CAP_DEFAULT_MTU) 2210 if (pi->num_conf_req || pi->num_conf_rsp)
1720 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2211 goto done;
2212
2213 switch (pi->mode) {
2214 case L2CAP_MODE_STREAMING:
2215 case L2CAP_MODE_ERTM:
2216 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2217 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2218 l2cap_send_disconn_req(pi->conn, sk);
2219 break;
2220 default:
2221 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2222 break;
2223 }
2224
2225done:
2226 switch (pi->mode) {
2227 case L2CAP_MODE_BASIC:
2228 if (pi->imtu != L2CAP_DEFAULT_MTU)
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2230 break;
2231
2232 case L2CAP_MODE_ERTM:
2233 rfc.mode = L2CAP_MODE_ERTM;
2234 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2235 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2236 rfc.retrans_timeout = 0;
2237 rfc.monitor_timeout = 0;
2238 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2239
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2241 sizeof(rfc), (unsigned long) &rfc);
2242
2243 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2244 break;
2245
2246 if (pi->fcs == L2CAP_FCS_NONE ||
2247 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2248 pi->fcs = L2CAP_FCS_NONE;
2249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2250 }
2251 break;
2252
2253 case L2CAP_MODE_STREAMING:
2254 rfc.mode = L2CAP_MODE_STREAMING;
2255 rfc.txwin_size = 0;
2256 rfc.max_transmit = 0;
2257 rfc.retrans_timeout = 0;
2258 rfc.monitor_timeout = 0;
2259 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2260
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2262 sizeof(rfc), (unsigned long) &rfc);
2263
2264 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2265 break;
2266
2267 if (pi->fcs == L2CAP_FCS_NONE ||
2268 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2269 pi->fcs = L2CAP_FCS_NONE;
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2271 }
2272 break;
2273 }
1721 2274
1722 /* FIXME: Need actual value of the flush timeout */ 2275 /* FIXME: Need actual value of the flush timeout */
1723 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) 2276 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
@@ -1767,6 +2320,12 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1767 memcpy(&rfc, (void *) val, olen); 2320 memcpy(&rfc, (void *) val, olen);
1768 break; 2321 break;
1769 2322
2323 case L2CAP_CONF_FCS:
2324 if (val == L2CAP_FCS_NONE)
2325 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2326
2327 break;
2328
1770 default: 2329 default:
1771 if (hint) 2330 if (hint)
1772 break; 2331 break;
@@ -1777,30 +2336,83 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1777 } 2336 }
1778 } 2337 }
1779 2338
2339 if (pi->num_conf_rsp || pi->num_conf_req)
2340 goto done;
2341
2342 switch (pi->mode) {
2343 case L2CAP_MODE_STREAMING:
2344 case L2CAP_MODE_ERTM:
2345 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2346 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2347 return -ECONNREFUSED;
2348 break;
2349 default:
2350 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2351 break;
2352 }
2353
2354done:
2355 if (pi->mode != rfc.mode) {
2356 result = L2CAP_CONF_UNACCEPT;
2357 rfc.mode = pi->mode;
2358
2359 if (pi->num_conf_rsp == 1)
2360 return -ECONNREFUSED;
2361
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2363 sizeof(rfc), (unsigned long) &rfc);
2364 }
2365
2366
1780 if (result == L2CAP_CONF_SUCCESS) { 2367 if (result == L2CAP_CONF_SUCCESS) {
1781 /* Configure output options and let the other side know 2368 /* Configure output options and let the other side know
1782 * which ones we don't like. */ 2369 * which ones we don't like. */
1783 2370
1784 if (rfc.mode == L2CAP_MODE_BASIC) { 2371 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1785 if (mtu < pi->omtu) 2372 result = L2CAP_CONF_UNACCEPT;
1786 result = L2CAP_CONF_UNACCEPT; 2373 else {
1787 else { 2374 pi->omtu = mtu;
1788 pi->omtu = mtu; 2375 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1789 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2376 }
1790 } 2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1791 2378
1792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 2379 switch (rfc.mode) {
1793 } else { 2380 case L2CAP_MODE_BASIC:
2381 pi->fcs = L2CAP_FCS_NONE;
2382 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2383 break;
2384
2385 case L2CAP_MODE_ERTM:
2386 pi->remote_tx_win = rfc.txwin_size;
2387 pi->remote_max_tx = rfc.max_transmit;
2388 pi->max_pdu_size = rfc.max_pdu_size;
2389
2390 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2391 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2392
2393 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2394 break;
2395
2396 case L2CAP_MODE_STREAMING:
2397 pi->remote_tx_win = rfc.txwin_size;
2398 pi->max_pdu_size = rfc.max_pdu_size;
2399
2400 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2401 break;
2402
2403 default:
1794 result = L2CAP_CONF_UNACCEPT; 2404 result = L2CAP_CONF_UNACCEPT;
1795 2405
1796 memset(&rfc, 0, sizeof(rfc)); 2406 memset(&rfc, 0, sizeof(rfc));
1797 rfc.mode = L2CAP_MODE_BASIC; 2407 rfc.mode = pi->mode;
1798
1799 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1800 sizeof(rfc), (unsigned long) &rfc);
1801 } 2408 }
1802 }
1803 2409
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2412
2413 if (result == L2CAP_CONF_SUCCESS)
2414 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2415 }
1804 rsp->scid = cpu_to_le16(pi->dcid); 2416 rsp->scid = cpu_to_le16(pi->dcid);
1805 rsp->result = cpu_to_le16(result); 2417 rsp->result = cpu_to_le16(result);
1806 rsp->flags = cpu_to_le16(0x0000); 2418 rsp->flags = cpu_to_le16(0x0000);
@@ -1808,6 +2420,73 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1808 return ptr - data; 2420 return ptr - data;
1809} 2421}
1810 2422
2423static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2424{
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2426 struct l2cap_conf_req *req = data;
2427 void *ptr = req->data;
2428 int type, olen;
2429 unsigned long val;
2430 struct l2cap_conf_rfc rfc;
2431
2432 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2433
2434 while (len >= L2CAP_CONF_OPT_SIZE) {
2435 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2436
2437 switch (type) {
2438 case L2CAP_CONF_MTU:
2439 if (val < L2CAP_DEFAULT_MIN_MTU) {
2440 *result = L2CAP_CONF_UNACCEPT;
2441 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2442 } else
2443 pi->omtu = val;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2445 break;
2446
2447 case L2CAP_CONF_FLUSH_TO:
2448 pi->flush_to = val;
2449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2450 2, pi->flush_to);
2451 break;
2452
2453 case L2CAP_CONF_RFC:
2454 if (olen == sizeof(rfc))
2455 memcpy(&rfc, (void *)val, olen);
2456
2457 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2458 rfc.mode != pi->mode)
2459 return -ECONNREFUSED;
2460
2461 pi->mode = rfc.mode;
2462 pi->fcs = 0;
2463
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2465 sizeof(rfc), (unsigned long) &rfc);
2466 break;
2467 }
2468 }
2469
2470 if (*result == L2CAP_CONF_SUCCESS) {
2471 switch (rfc.mode) {
2472 case L2CAP_MODE_ERTM:
2473 pi->remote_tx_win = rfc.txwin_size;
2474 pi->retrans_timeout = rfc.retrans_timeout;
2475 pi->monitor_timeout = rfc.monitor_timeout;
2476 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2477 break;
2478 case L2CAP_MODE_STREAMING:
2479 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2480 break;
2481 }
2482 }
2483
2484 req->dcid = cpu_to_le16(pi->dcid);
2485 req->flags = cpu_to_le16(0x0000);
2486
2487 return ptr - data;
2488}
2489
1811static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) 2490static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1812{ 2491{
1813 struct l2cap_conf_rsp *rsp = data; 2492 struct l2cap_conf_rsp *rsp = data;
@@ -1994,6 +2673,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
1994 2673
1995 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1996 l2cap_build_conf_req(sk, req), req); 2675 l2cap_build_conf_req(sk, req), req);
2676 l2cap_pi(sk)->num_conf_req++;
1997 break; 2677 break;
1998 2678
1999 case L2CAP_CR_PEND: 2679 case L2CAP_CR_PEND:
@@ -2052,10 +2732,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2052 2732
2053 /* Complete config. */ 2733 /* Complete config. */
2054 len = l2cap_parse_conf_req(sk, rsp); 2734 len = l2cap_parse_conf_req(sk, rsp);
2055 if (len < 0) 2735 if (len < 0) {
2736 l2cap_send_disconn_req(conn, sk);
2056 goto unlock; 2737 goto unlock;
2738 }
2057 2739
2058 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 2740 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2741 l2cap_pi(sk)->num_conf_rsp++;
2059 2742
2060 /* Reset config buffer. */ 2743 /* Reset config buffer. */
2061 l2cap_pi(sk)->conf_len = 0; 2744 l2cap_pi(sk)->conf_len = 0;
@@ -2064,7 +2747,22 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2064 goto unlock; 2747 goto unlock;
2065 2748
2066 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 2749 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2750 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2751 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2752 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2753
2067 sk->sk_state = BT_CONNECTED; 2754 sk->sk_state = BT_CONNECTED;
2755 l2cap_pi(sk)->next_tx_seq = 0;
2756 l2cap_pi(sk)->expected_ack_seq = 0;
2757 l2cap_pi(sk)->unacked_frames = 0;
2758
2759 setup_timer(&l2cap_pi(sk)->retrans_timer,
2760 l2cap_retrans_timeout, (unsigned long) sk);
2761 setup_timer(&l2cap_pi(sk)->monitor_timer,
2762 l2cap_monitor_timeout, (unsigned long) sk);
2763
2764 __skb_queue_head_init(TX_QUEUE(sk));
2765 __skb_queue_head_init(SREJ_QUEUE(sk));
2068 l2cap_chan_ready(sk); 2766 l2cap_chan_ready(sk);
2069 goto unlock; 2767 goto unlock;
2070 } 2768 }
@@ -2073,6 +2771,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2073 u8 buf[64]; 2771 u8 buf[64];
2074 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2772 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2075 l2cap_build_conf_req(sk, buf), buf); 2773 l2cap_build_conf_req(sk, buf), buf);
2774 l2cap_pi(sk)->num_conf_req++;
2076 } 2775 }
2077 2776
2078unlock: 2777unlock:
@@ -2102,29 +2801,32 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2102 break; 2801 break;
2103 2802
2104 case L2CAP_CONF_UNACCEPT: 2803 case L2CAP_CONF_UNACCEPT:
2105 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) { 2804 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2106 char req[128]; 2805 int len = cmd->len - sizeof(*rsp);
2107 /* It does not make sense to adjust L2CAP parameters 2806 char req[64];
2108 * that are currently defined in the spec. We simply 2807
2109 * resend config request that we sent earlier. It is 2808 /* throw out any old stored conf requests */
2110 * stupid, but it helps qualification testing which 2809 result = L2CAP_CONF_SUCCESS;
2111 * expects at least some response from us. */ 2810 len = l2cap_parse_conf_rsp(sk, rsp->data,
2112 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2811 len, req, &result);
2113 l2cap_build_conf_req(sk, req), req); 2812 if (len < 0) {
2114 goto done; 2813 l2cap_send_disconn_req(conn, sk);
2814 goto done;
2815 }
2816
2817 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2818 L2CAP_CONF_REQ, len, req);
2819 l2cap_pi(sk)->num_conf_req++;
2820 if (result != L2CAP_CONF_SUCCESS)
2821 goto done;
2822 break;
2115 } 2823 }
2116 2824
2117 default: 2825 default:
2118 sk->sk_state = BT_DISCONN; 2826 sk->sk_state = BT_DISCONN;
2119 sk->sk_err = ECONNRESET; 2827 sk->sk_err = ECONNRESET;
2120 l2cap_sock_set_timer(sk, HZ * 5); 2828 l2cap_sock_set_timer(sk, HZ * 5);
2121 { 2829 l2cap_send_disconn_req(conn, sk);
2122 struct l2cap_disconn_req req;
2123 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2124 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2125 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2126 L2CAP_DISCONN_REQ, sizeof(req), &req);
2127 }
2128 goto done; 2830 goto done;
2129 } 2831 }
2130 2832
@@ -2134,7 +2836,16 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2134 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 2836 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2135 2837
2136 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2838 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2839 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2840 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2841 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2842
2137 sk->sk_state = BT_CONNECTED; 2843 sk->sk_state = BT_CONNECTED;
2844 l2cap_pi(sk)->expected_tx_seq = 0;
2845 l2cap_pi(sk)->buffer_seq = 0;
2846 l2cap_pi(sk)->num_to_ack = 0;
2847 __skb_queue_head_init(TX_QUEUE(sk));
2848 __skb_queue_head_init(SREJ_QUEUE(sk));
2138 l2cap_chan_ready(sk); 2849 l2cap_chan_ready(sk);
2139 } 2850 }
2140 2851
@@ -2165,6 +2876,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2165 2876
2166 sk->sk_shutdown = SHUTDOWN_MASK; 2877 sk->sk_shutdown = SHUTDOWN_MASK;
2167 2878
2879 skb_queue_purge(TX_QUEUE(sk));
2880 skb_queue_purge(SREJ_QUEUE(sk));
2881 del_timer(&l2cap_pi(sk)->retrans_timer);
2882 del_timer(&l2cap_pi(sk)->monitor_timer);
2883
2168 l2cap_chan_del(sk, ECONNRESET); 2884 l2cap_chan_del(sk, ECONNRESET);
2169 bh_unlock_sock(sk); 2885 bh_unlock_sock(sk);
2170 2886
@@ -2187,6 +2903,11 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2187 if (!sk) 2903 if (!sk)
2188 return 0; 2904 return 0;
2189 2905
2906 skb_queue_purge(TX_QUEUE(sk));
2907 skb_queue_purge(SREJ_QUEUE(sk));
2908 del_timer(&l2cap_pi(sk)->retrans_timer);
2909 del_timer(&l2cap_pi(sk)->monitor_timer);
2910
2190 l2cap_chan_del(sk, 0); 2911 l2cap_chan_del(sk, 0);
2191 bh_unlock_sock(sk); 2912 bh_unlock_sock(sk);
2192 2913
@@ -2205,10 +2926,14 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
2205 2926
2206 if (type == L2CAP_IT_FEAT_MASK) { 2927 if (type == L2CAP_IT_FEAT_MASK) {
2207 u8 buf[8]; 2928 u8 buf[8];
2929 u32 feat_mask = l2cap_feat_mask;
2208 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 2930 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2209 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 2931 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2210 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 2932 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2211 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data); 2933 if (enable_ertm)
2934 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2935 | L2CAP_FEAT_FCS;
2936 put_unaligned_le32(feat_mask, rsp->data);
2212 l2cap_send_cmd(conn, cmd->ident, 2937 l2cap_send_cmd(conn, cmd->ident,
2213 L2CAP_INFO_RSP, sizeof(buf), buf); 2938 L2CAP_INFO_RSP, sizeof(buf), buf);
2214 } else if (type == L2CAP_IT_FIXED_CHAN) { 2939 } else if (type == L2CAP_IT_FIXED_CHAN) {
@@ -2359,9 +3084,374 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
2359 kfree_skb(skb); 3084 kfree_skb(skb);
2360} 3085}
2361 3086
3087static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3088{
3089 u16 our_fcs, rcv_fcs;
3090 int hdr_size = L2CAP_HDR_SIZE + 2;
3091
3092 if (pi->fcs == L2CAP_FCS_CRC16) {
3093 skb_trim(skb, skb->len - 2);
3094 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3095 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3096
3097 if (our_fcs != rcv_fcs)
3098 return -EINVAL;
3099 }
3100 return 0;
3101}
3102
3103static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3104{
3105 struct sk_buff *next_skb;
3106
3107 bt_cb(skb)->tx_seq = tx_seq;
3108 bt_cb(skb)->sar = sar;
3109
3110 next_skb = skb_peek(SREJ_QUEUE(sk));
3111 if (!next_skb) {
3112 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3113 return;
3114 }
3115
3116 do {
3117 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3118 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3119 return;
3120 }
3121
3122 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3123 break;
3124
3125 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3126
3127 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3128}
3129
3130static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3131{
3132 struct l2cap_pinfo *pi = l2cap_pi(sk);
3133 struct sk_buff *_skb;
3134 int err = -EINVAL;
3135
3136 switch (control & L2CAP_CTRL_SAR) {
3137 case L2CAP_SDU_UNSEGMENTED:
3138 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3139 kfree_skb(pi->sdu);
3140 break;
3141 }
3142
3143 err = sock_queue_rcv_skb(sk, skb);
3144 if (!err)
3145 return 0;
3146
3147 break;
3148
3149 case L2CAP_SDU_START:
3150 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3151 kfree_skb(pi->sdu);
3152 break;
3153 }
3154
3155 pi->sdu_len = get_unaligned_le16(skb->data);
3156 skb_pull(skb, 2);
3157
3158 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3159 if (!pi->sdu) {
3160 err = -ENOMEM;
3161 break;
3162 }
3163
3164 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3165
3166 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3167 pi->partial_sdu_len = skb->len;
3168 err = 0;
3169 break;
3170
3171 case L2CAP_SDU_CONTINUE:
3172 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3173 break;
3174
3175 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3176
3177 pi->partial_sdu_len += skb->len;
3178 if (pi->partial_sdu_len > pi->sdu_len)
3179 kfree_skb(pi->sdu);
3180 else
3181 err = 0;
3182
3183 break;
3184
3185 case L2CAP_SDU_END:
3186 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3187 break;
3188
3189 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3190
3191 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3192 pi->partial_sdu_len += skb->len;
3193
3194 if (pi->partial_sdu_len == pi->sdu_len) {
3195 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3196 err = sock_queue_rcv_skb(sk, _skb);
3197 if (err < 0)
3198 kfree_skb(_skb);
3199 }
3200 kfree_skb(pi->sdu);
3201 err = 0;
3202
3203 break;
3204 }
3205
3206 kfree_skb(skb);
3207 return err;
3208}
3209
3210static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3211{
3212 struct sk_buff *skb;
3213 u16 control = 0;
3214
3215 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3216 if (bt_cb(skb)->tx_seq != tx_seq)
3217 break;
3218
3219 skb = skb_dequeue(SREJ_QUEUE(sk));
3220 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3221 l2cap_sar_reassembly_sdu(sk, skb, control);
3222 l2cap_pi(sk)->buffer_seq_srej =
3223 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3224 tx_seq++;
3225 }
3226}
3227
3228static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3229{
3230 struct l2cap_pinfo *pi = l2cap_pi(sk);
3231 struct srej_list *l, *tmp;
3232 u16 control;
3233
3234 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3235 if (l->tx_seq == tx_seq) {
3236 list_del(&l->list);
3237 kfree(l);
3238 return;
3239 }
3240 control = L2CAP_SUPER_SELECT_REJECT;
3241 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3242 l2cap_send_sframe(pi, control);
3243 list_del(&l->list);
3244 list_add_tail(&l->list, SREJ_LIST(sk));
3245 }
3246}
3247
3248static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3249{
3250 struct l2cap_pinfo *pi = l2cap_pi(sk);
3251 struct srej_list *new;
3252 u16 control;
3253
3254 while (tx_seq != pi->expected_tx_seq) {
3255 control = L2CAP_SUPER_SELECT_REJECT;
3256 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3257 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3258 control |= L2CAP_CTRL_POLL;
3259 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3260 }
3261 l2cap_send_sframe(pi, control);
3262
3263 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3264 new->tx_seq = pi->expected_tx_seq++;
3265 list_add_tail(&new->list, SREJ_LIST(sk));
3266 }
3267 pi->expected_tx_seq++;
3268}
3269
3270static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3271{
3272 struct l2cap_pinfo *pi = l2cap_pi(sk);
3273 u8 tx_seq = __get_txseq(rx_control);
3274 u16 tx_control = 0;
3275 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3276 int err = 0;
3277
3278 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3279
3280 if (tx_seq == pi->expected_tx_seq)
3281 goto expected;
3282
3283 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3284 struct srej_list *first;
3285
3286 first = list_first_entry(SREJ_LIST(sk),
3287 struct srej_list, list);
3288 if (tx_seq == first->tx_seq) {
3289 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3290 l2cap_check_srej_gap(sk, tx_seq);
3291
3292 list_del(&first->list);
3293 kfree(first);
3294
3295 if (list_empty(SREJ_LIST(sk))) {
3296 pi->buffer_seq = pi->buffer_seq_srej;
3297 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3298 }
3299 } else {
3300 struct srej_list *l;
3301 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3302
3303 list_for_each_entry(l, SREJ_LIST(sk), list) {
3304 if (l->tx_seq == tx_seq) {
3305 l2cap_resend_srejframe(sk, tx_seq);
3306 return 0;
3307 }
3308 }
3309 l2cap_send_srejframe(sk, tx_seq);
3310 }
3311 } else {
3312 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3313
3314 INIT_LIST_HEAD(SREJ_LIST(sk));
3315 pi->buffer_seq_srej = pi->buffer_seq;
3316
3317 __skb_queue_head_init(SREJ_QUEUE(sk));
3318 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3319
3320 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3321
3322 l2cap_send_srejframe(sk, tx_seq);
3323 }
3324 return 0;
3325
3326expected:
3327 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3328
3329 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3330 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3331 return 0;
3332 }
3333
3334 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3335
3336 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3337 if (err < 0)
3338 return err;
3339
3340 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3341 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3342 tx_control |= L2CAP_SUPER_RCV_READY;
3343 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3344 l2cap_send_sframe(pi, tx_control);
3345 }
3346 return 0;
3347}
3348
3349static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3350{
3351 struct l2cap_pinfo *pi = l2cap_pi(sk);
3352 u8 tx_seq = __get_reqseq(rx_control);
3353
3354 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3355
3356 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3357 case L2CAP_SUPER_RCV_READY:
3358 if (rx_control & L2CAP_CTRL_POLL) {
3359 u16 control = L2CAP_CTRL_FINAL;
3360 control |= L2CAP_SUPER_RCV_READY |
3361 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3362 l2cap_send_sframe(l2cap_pi(sk), control);
3363 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3364
3365 } else if (rx_control & L2CAP_CTRL_FINAL) {
3366 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3367 pi->expected_ack_seq = tx_seq;
3368 l2cap_drop_acked_frames(sk);
3369
3370 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3371 break;
3372
3373 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3374 del_timer(&pi->monitor_timer);
3375
3376 if (pi->unacked_frames > 0)
3377 __mod_retrans_timer();
3378 } else {
3379 pi->expected_ack_seq = tx_seq;
3380 l2cap_drop_acked_frames(sk);
3381
3382 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3383 && (pi->unacked_frames > 0))
3384 __mod_retrans_timer();
3385
3386 l2cap_ertm_send(sk);
3387 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3388 }
3389 break;
3390
3391 case L2CAP_SUPER_REJECT:
3392 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3393
3394 pi->expected_ack_seq = __get_reqseq(rx_control);
3395 l2cap_drop_acked_frames(sk);
3396
3397 sk->sk_send_head = TX_QUEUE(sk)->next;
3398 pi->next_tx_seq = pi->expected_ack_seq;
3399
3400 l2cap_ertm_send(sk);
3401
3402 break;
3403
3404 case L2CAP_SUPER_SELECT_REJECT:
3405 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3406
3407 if (rx_control & L2CAP_CTRL_POLL) {
3408 l2cap_retransmit_frame(sk, tx_seq);
3409 pi->expected_ack_seq = tx_seq;
3410 l2cap_drop_acked_frames(sk);
3411 l2cap_ertm_send(sk);
3412 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3413 pi->srej_save_reqseq = tx_seq;
3414 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3415 }
3416 } else if (rx_control & L2CAP_CTRL_FINAL) {
3417 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3418 pi->srej_save_reqseq == tx_seq)
3419 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3420 else
3421 l2cap_retransmit_frame(sk, tx_seq);
3422 }
3423 else {
3424 l2cap_retransmit_frame(sk, tx_seq);
3425 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3426 pi->srej_save_reqseq = tx_seq;
3427 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3428 }
3429 }
3430 break;
3431
3432 case L2CAP_SUPER_RCV_NOT_READY:
3433 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3434 pi->expected_ack_seq = tx_seq;
3435 l2cap_drop_acked_frames(sk);
3436
3437 del_timer(&l2cap_pi(sk)->retrans_timer);
3438 if (rx_control & L2CAP_CTRL_POLL) {
3439 u16 control = L2CAP_CTRL_FINAL;
3440 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3441 }
3442 break;
3443 }
3444
3445 return 0;
3446}
3447
2362static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 3448static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2363{ 3449{
2364 struct sock *sk; 3450 struct sock *sk;
3451 struct l2cap_pinfo *pi;
3452 u16 control, len;
3453 u8 tx_seq;
3454 int err;
2365 3455
2366 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3456 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2367 if (!sk) { 3457 if (!sk) {
@@ -2369,22 +3459,91 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
2369 goto drop; 3459 goto drop;
2370 } 3460 }
2371 3461
3462 pi = l2cap_pi(sk);
3463
2372 BT_DBG("sk %p, len %d", sk, skb->len); 3464 BT_DBG("sk %p, len %d", sk, skb->len);
2373 3465
2374 if (sk->sk_state != BT_CONNECTED) 3466 if (sk->sk_state != BT_CONNECTED)
2375 goto drop; 3467 goto drop;
2376 3468
2377 if (l2cap_pi(sk)->imtu < skb->len) 3469 switch (pi->mode) {
2378 goto drop; 3470 case L2CAP_MODE_BASIC:
3471 /* If socket recv buffers overflows we drop data here
3472 * which is *bad* because L2CAP has to be reliable.
3473 * But we don't have any other choice. L2CAP doesn't
3474 * provide flow control mechanism. */
2379 3475
2380 /* If socket recv buffers overflows we drop data here 3476 if (pi->imtu < skb->len)
2381 * which is *bad* because L2CAP has to be reliable. 3477 goto drop;
2382 * But we don't have any other choice. L2CAP doesn't 3478
2383 * provide flow control mechanism. */ 3479 if (!sock_queue_rcv_skb(sk, skb))
3480 goto done;
3481 break;
3482
3483 case L2CAP_MODE_ERTM:
3484 control = get_unaligned_le16(skb->data);
3485 skb_pull(skb, 2);
3486 len = skb->len;
3487
3488 if (__is_sar_start(control))
3489 len -= 2;
3490
3491 if (pi->fcs == L2CAP_FCS_CRC16)
3492 len -= 2;
3493
3494 /*
3495 * We can just drop the corrupted I-frame here.
3496 * Receiver will miss it and start proper recovery
3497 * procedures and ask retransmission.
3498 */
3499 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3500 goto drop;
3501
3502 if (l2cap_check_fcs(pi, skb))
3503 goto drop;
3504
3505 if (__is_iframe(control))
3506 err = l2cap_data_channel_iframe(sk, control, skb);
3507 else
3508 err = l2cap_data_channel_sframe(sk, control, skb);
3509
3510 if (!err)
3511 goto done;
3512 break;
3513
3514 case L2CAP_MODE_STREAMING:
3515 control = get_unaligned_le16(skb->data);
3516 skb_pull(skb, 2);
3517 len = skb->len;
3518
3519 if (__is_sar_start(control))
3520 len -= 2;
3521
3522 if (pi->fcs == L2CAP_FCS_CRC16)
3523 len -= 2;
3524
3525 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3526 goto drop;
3527
3528 if (l2cap_check_fcs(pi, skb))
3529 goto drop;
3530
3531 tx_seq = __get_txseq(control);
3532
3533 if (pi->expected_tx_seq == tx_seq)
3534 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3535 else
3536 pi->expected_tx_seq = tx_seq + 1;
3537
3538 err = l2cap_sar_reassembly_sdu(sk, skb, control);
2384 3539
2385 if (!sock_queue_rcv_skb(sk, skb))
2386 goto done; 3540 goto done;
2387 3541
3542 default:
3543 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3544 break;
3545 }
3546
2388drop: 3547drop:
2389 kfree_skb(skb); 3548 kfree_skb(skb);
2390 3549
@@ -2433,6 +3592,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2433 cid = __le16_to_cpu(lh->cid); 3592 cid = __le16_to_cpu(lh->cid);
2434 len = __le16_to_cpu(lh->len); 3593 len = __le16_to_cpu(lh->len);
2435 3594
3595 if (len != skb->len) {
3596 kfree_skb(skb);
3597 return;
3598 }
3599
2436 BT_DBG("len %d, cid 0x%4.4x", len, cid); 3600 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2437 3601
2438 switch (cid) { 3602 switch (cid) {
@@ -2441,7 +3605,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2441 break; 3605 break;
2442 3606
2443 case L2CAP_CID_CONN_LESS: 3607 case L2CAP_CID_CONN_LESS:
2444 psm = get_unaligned((__le16 *) skb->data); 3608 psm = get_unaligned_le16(skb->data);
2445 skb_pull(skb, 2); 3609 skb_pull(skb, 2);
2446 l2cap_conless_channel(conn, psm, skb); 3610 l2cap_conless_channel(conn, psm, skb);
2447 break; 3611 break;
@@ -2828,6 +3992,9 @@ EXPORT_SYMBOL(l2cap_load);
2828module_init(l2cap_init); 3992module_init(l2cap_init);
2829module_exit(l2cap_exit); 3993module_exit(l2cap_exit);
2830 3994
3995module_param(enable_ertm, bool, 0644);
3996MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3997
2831MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 3998MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2832MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); 3999MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2833MODULE_VERSION(VERSION); 4000MODULE_VERSION(VERSION);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index e50566ebf9f9..25692bc0a342 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -244,6 +244,33 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
244 auth_type); 244 auth_type);
245} 245}
246 246
247static void rfcomm_session_timeout(unsigned long arg)
248{
249 struct rfcomm_session *s = (void *) arg;
250
251 BT_DBG("session %p state %ld", s, s->state);
252
253 set_bit(RFCOMM_TIMED_OUT, &s->flags);
254 rfcomm_session_put(s);
255 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
256}
257
258static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
259{
260 BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
261
262 if (!mod_timer(&s->timer, jiffies + timeout))
263 rfcomm_session_hold(s);
264}
265
266static void rfcomm_session_clear_timer(struct rfcomm_session *s)
267{
268 BT_DBG("session %p state %ld", s, s->state);
269
270 if (timer_pending(&s->timer) && del_timer(&s->timer))
271 rfcomm_session_put(s);
272}
273
247/* ---- RFCOMM DLCs ---- */ 274/* ---- RFCOMM DLCs ---- */
248static void rfcomm_dlc_timeout(unsigned long arg) 275static void rfcomm_dlc_timeout(unsigned long arg)
249{ 276{
@@ -320,6 +347,7 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d)
320 347
321 rfcomm_session_hold(s); 348 rfcomm_session_hold(s);
322 349
350 rfcomm_session_clear_timer(s);
323 rfcomm_dlc_hold(d); 351 rfcomm_dlc_hold(d);
324 list_add(&d->list, &s->dlcs); 352 list_add(&d->list, &s->dlcs);
325 d->session = s; 353 d->session = s;
@@ -335,6 +363,9 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
335 d->session = NULL; 363 d->session = NULL;
336 rfcomm_dlc_put(d); 364 rfcomm_dlc_put(d);
337 365
366 if (list_empty(&s->dlcs))
367 rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT);
368
338 rfcomm_session_put(s); 369 rfcomm_session_put(s);
339} 370}
340 371
@@ -567,6 +598,8 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
567 598
568 BT_DBG("session %p sock %p", s, sock); 599 BT_DBG("session %p sock %p", s, sock);
569 600
601 setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
602
570 INIT_LIST_HEAD(&s->dlcs); 603 INIT_LIST_HEAD(&s->dlcs);
571 s->state = state; 604 s->state = state;
572 s->sock = sock; 605 s->sock = sock;
@@ -598,6 +631,7 @@ static void rfcomm_session_del(struct rfcomm_session *s)
598 if (state == BT_CONNECTED) 631 if (state == BT_CONNECTED)
599 rfcomm_send_disc(s, 0); 632 rfcomm_send_disc(s, 0);
600 633
634 rfcomm_session_clear_timer(s);
601 sock_release(s->sock); 635 sock_release(s->sock);
602 kfree(s); 636 kfree(s);
603 637
@@ -639,6 +673,7 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
639 __rfcomm_dlc_close(d, err); 673 __rfcomm_dlc_close(d, err);
640 } 674 }
641 675
676 rfcomm_session_clear_timer(s);
642 rfcomm_session_put(s); 677 rfcomm_session_put(s);
643} 678}
644 679
@@ -1879,6 +1914,12 @@ static inline void rfcomm_process_sessions(void)
1879 struct rfcomm_session *s; 1914 struct rfcomm_session *s;
1880 s = list_entry(p, struct rfcomm_session, list); 1915 s = list_entry(p, struct rfcomm_session, list);
1881 1916
1917 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1918 s->state = BT_DISCONN;
1919 rfcomm_send_disc(s, 0);
1920 continue;
1921 }
1922
1882 if (s->state == BT_LISTEN) { 1923 if (s->state == BT_LISTEN) {
1883 rfcomm_accept_connection(s); 1924 rfcomm_accept_connection(s);
1884 continue; 1925 continue;
@@ -2080,28 +2121,43 @@ static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL);
2080/* ---- Initialization ---- */ 2121/* ---- Initialization ---- */
2081static int __init rfcomm_init(void) 2122static int __init rfcomm_init(void)
2082{ 2123{
2124 int err;
2125
2083 l2cap_load(); 2126 l2cap_load();
2084 2127
2085 hci_register_cb(&rfcomm_cb); 2128 hci_register_cb(&rfcomm_cb);
2086 2129
2087 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2130 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
2088 if (IS_ERR(rfcomm_thread)) { 2131 if (IS_ERR(rfcomm_thread)) {
2089 hci_unregister_cb(&rfcomm_cb); 2132 err = PTR_ERR(rfcomm_thread);
2090 return PTR_ERR(rfcomm_thread); 2133 goto unregister;
2091 } 2134 }
2092 2135
2093 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2136 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
2094 BT_ERR("Failed to create RFCOMM info file"); 2137 BT_ERR("Failed to create RFCOMM info file");
2095 2138
2096 rfcomm_init_sockets(); 2139 err = rfcomm_init_ttys();
2140 if (err < 0)
2141 goto stop;
2097 2142
2098#ifdef CONFIG_BT_RFCOMM_TTY 2143 err = rfcomm_init_sockets();
2099 rfcomm_init_ttys(); 2144 if (err < 0)
2100#endif 2145 goto cleanup;
2101 2146
2102 BT_INFO("RFCOMM ver %s", VERSION); 2147 BT_INFO("RFCOMM ver %s", VERSION);
2103 2148
2104 return 0; 2149 return 0;
2150
2151cleanup:
2152 rfcomm_cleanup_ttys();
2153
2154stop:
2155 kthread_stop(rfcomm_thread);
2156
2157unregister:
2158 hci_unregister_cb(&rfcomm_cb);
2159
2160 return err;
2105} 2161}
2106 2162
2107static void __exit rfcomm_exit(void) 2163static void __exit rfcomm_exit(void)
@@ -2112,9 +2168,7 @@ static void __exit rfcomm_exit(void)
2112 2168
2113 kthread_stop(rfcomm_thread); 2169 kthread_stop(rfcomm_thread);
2114 2170
2115#ifdef CONFIG_BT_RFCOMM_TTY
2116 rfcomm_cleanup_ttys(); 2171 rfcomm_cleanup_ttys();
2117#endif
2118 2172
2119 rfcomm_cleanup_sockets(); 2173 rfcomm_cleanup_sockets();
2120} 2174}
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 7f482784e9f7..0b85e8116859 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1132,7 +1132,7 @@ error:
1132 return err; 1132 return err;
1133} 1133}
1134 1134
1135void __exit rfcomm_cleanup_sockets(void) 1135void rfcomm_cleanup_sockets(void)
1136{ 1136{
1137 class_remove_file(bt_class, &class_attr_rfcomm); 1137 class_remove_file(bt_class, &class_attr_rfcomm);
1138 1138
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 51ae0c3e470a..13c27f17192c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -359,20 +359,9 @@ static void sco_sock_kill(struct sock *sk)
359 sock_put(sk); 359 sock_put(sk);
360} 360}
361 361
362/* Close socket. 362static void __sco_sock_close(struct sock *sk)
363 * Must be called on unlocked socket.
364 */
365static void sco_sock_close(struct sock *sk)
366{ 363{
367 struct sco_conn *conn; 364 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
368
369 sco_sock_clear_timer(sk);
370
371 lock_sock(sk);
372
373 conn = sco_pi(sk)->conn;
374
375 BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket);
376 365
377 switch (sk->sk_state) { 366 switch (sk->sk_state) {
378 case BT_LISTEN: 367 case BT_LISTEN:
@@ -390,9 +379,15 @@ static void sco_sock_close(struct sock *sk)
390 sock_set_flag(sk, SOCK_ZAPPED); 379 sock_set_flag(sk, SOCK_ZAPPED);
391 break; 380 break;
392 } 381 }
382}
393 383
384/* Must be called on unlocked socket. */
385static void sco_sock_close(struct sock *sk)
386{
387 sco_sock_clear_timer(sk);
388 lock_sock(sk);
389 __sco_sock_close(sk);
394 release_sock(sk); 390 release_sock(sk);
395
396 sco_sock_kill(sk); 391 sco_sock_kill(sk);
397} 392}
398 393
@@ -748,6 +743,30 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
748 return err; 743 return err;
749} 744}
750 745
746static int sco_sock_shutdown(struct socket *sock, int how)
747{
748 struct sock *sk = sock->sk;
749 int err = 0;
750
751 BT_DBG("sock %p, sk %p", sock, sk);
752
753 if (!sk)
754 return 0;
755
756 lock_sock(sk);
757 if (!sk->sk_shutdown) {
758 sk->sk_shutdown = SHUTDOWN_MASK;
759 sco_sock_clear_timer(sk);
760 __sco_sock_close(sk);
761
762 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
763 err = bt_sock_wait_state(sk, BT_CLOSED,
764 sk->sk_lingertime);
765 }
766 release_sock(sk);
767 return err;
768}
769
751static int sco_sock_release(struct socket *sock) 770static int sco_sock_release(struct socket *sock)
752{ 771{
753 struct sock *sk = sock->sk; 772 struct sock *sk = sock->sk;
@@ -969,7 +988,7 @@ static const struct proto_ops sco_sock_ops = {
969 .ioctl = bt_sock_ioctl, 988 .ioctl = bt_sock_ioctl,
970 .mmap = sock_no_mmap, 989 .mmap = sock_no_mmap,
971 .socketpair = sock_no_socketpair, 990 .socketpair = sock_no_socketpair,
972 .shutdown = sock_no_shutdown, 991 .shutdown = sco_sock_shutdown,
973 .setsockopt = sco_sock_setsockopt, 992 .setsockopt = sco_sock_setsockopt,
974 .getsockopt = sco_sock_getsockopt 993 .getsockopt = sco_sock_getsockopt
975}; 994};
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 15d43ba86b53..07a07770c8b6 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -20,7 +20,7 @@
20#include "br_private.h" 20#include "br_private.h"
21 21
22/* net device transmit always called with no BH (preempt_disabled) */ 22/* net device transmit always called with no BH (preempt_disabled) */
23int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 23netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
24{ 24{
25 struct net_bridge *br = netdev_priv(dev); 25 struct net_bridge *br = netdev_priv(dev);
26 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index d2c27c808d3b..bc1704ac6cd9 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -22,7 +22,8 @@
22static inline int should_deliver(const struct net_bridge_port *p, 22static inline int should_deliver(const struct net_bridge_port *p,
23 const struct sk_buff *skb) 23 const struct sk_buff *skb)
24{ 24{
25 return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING); 25 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
26 p->state == BR_STATE_FORWARDING);
26} 27}
27 28
28static inline unsigned packet_length(const struct sk_buff *skb) 29static inline unsigned packet_length(const struct sk_buff *skb)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index eb404dc3ed6e..e486f1fc3632 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -256,6 +256,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
256 p->path_cost = port_cost(dev); 256 p->path_cost = port_cost(dev);
257 p->priority = 0x8000 >> BR_PORT_BITS; 257 p->priority = 0x8000 >> BR_PORT_BITS;
258 p->port_no = index; 258 p->port_no = index;
259 p->flags = 0;
259 br_init_port(p); 260 br_init_port(p);
260 p->state = BR_STATE_DISABLED; 261 p->state = BR_STATE_DISABLED;
261 br_stp_port_timer_init(p); 262 br_stp_port_timer_init(p);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d5b5537272b4..2114e45682ea 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -81,6 +81,9 @@ struct net_bridge_port
81 struct timer_list message_age_timer; 81 struct timer_list message_age_timer;
82 struct kobject kobj; 82 struct kobject kobj;
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84
85 unsigned long flags;
86#define BR_HAIRPIN_MODE 0x00000001
84}; 87};
85 88
86struct net_bridge 89struct net_bridge
@@ -140,7 +143,8 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
140 143
141/* br_device.c */ 144/* br_device.c */
142extern void br_dev_setup(struct net_device *dev); 145extern void br_dev_setup(struct net_device *dev);
143extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev); 146extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
147 struct net_device *dev);
144 148
145/* br_fdb.c */ 149/* br_fdb.c */
146extern int br_fdb_init(void); 150extern int br_fdb_init(void);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 4a3cdf8f3813..820643a3ba9c 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -143,6 +143,22 @@ static ssize_t store_flush(struct net_bridge_port *p, unsigned long v)
143} 143}
144static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); 144static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
145 145
146static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
147{
148 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
149 return sprintf(buf, "%d\n", hairpin_mode);
150}
151static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
152{
153 if (v)
154 p->flags |= BR_HAIRPIN_MODE;
155 else
156 p->flags &= ~BR_HAIRPIN_MODE;
157 return 0;
158}
159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
160 show_hairpin_mode, store_hairpin_mode);
161
146static struct brport_attribute *brport_attrs[] = { 162static struct brport_attribute *brport_attrs[] = {
147 &brport_attr_path_cost, 163 &brport_attr_path_cost,
148 &brport_attr_priority, 164 &brport_attr_priority,
@@ -159,6 +175,7 @@ static struct brport_attribute *brport_attrs[] = {
159 &brport_attr_forward_delay_timer, 175 &brport_attr_forward_delay_timer,
160 &brport_attr_hold_timer, 176 &brport_attr_hold_timer,
161 &brport_attr_flush, 177 &brport_attr_flush,
178 &brport_attr_hairpin_mode,
162 NULL 179 NULL
163}; 180};
164 181
diff --git a/net/can/af_can.c b/net/can/af_can.c
index e733725b11d4..ef1c43a2ed56 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -651,12 +651,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
651 struct can_frame *cf = (struct can_frame *)skb->data; 651 struct can_frame *cf = (struct can_frame *)skb->data;
652 int matches; 652 int matches;
653 653
654 if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) { 654 if (!net_eq(dev_net(dev), &init_net))
655 kfree_skb(skb); 655 goto drop;
656 return 0;
657 }
658 656
659 BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8); 657 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
658 skb->len != sizeof(struct can_frame) ||
659 cf->can_dlc > 8,
660 "PF_CAN: dropped non conform skbuf: "
661 "dev type %d, len %d, can_dlc %d\n",
662 dev->type, skb->len, cf->can_dlc))
663 goto drop;
660 664
661 /* update statistics */ 665 /* update statistics */
662 can_stats.rx_frames++; 666 can_stats.rx_frames++;
@@ -682,7 +686,11 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
682 can_stats.matches_delta++; 686 can_stats.matches_delta++;
683 } 687 }
684 688
685 return 0; 689 return NET_RX_SUCCESS;
690
691drop:
692 kfree_skb(skb);
693 return NET_RX_DROP;
686} 694}
687 695
688/* 696/*
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 72720c710351..597da4f8f888 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -46,6 +46,7 @@
46#include <linux/hrtimer.h> 46#include <linux/hrtimer.h>
47#include <linux/list.h> 47#include <linux/list.h>
48#include <linux/proc_fs.h> 48#include <linux/proc_fs.h>
49#include <linux/seq_file.h>
49#include <linux/uio.h> 50#include <linux/uio.h>
50#include <linux/net.h> 51#include <linux/net.h>
51#include <linux/netdevice.h> 52#include <linux/netdevice.h>
@@ -146,23 +147,18 @@ static char *bcm_proc_getifname(int ifindex)
146 return "???"; 147 return "???";
147} 148}
148 149
149static int bcm_read_proc(char *page, char **start, off_t off, 150static int bcm_proc_show(struct seq_file *m, void *v)
150 int count, int *eof, void *data)
151{ 151{
152 int len = 0; 152 struct sock *sk = (struct sock *)m->private;
153 struct sock *sk = (struct sock *)data;
154 struct bcm_sock *bo = bcm_sk(sk); 153 struct bcm_sock *bo = bcm_sk(sk);
155 struct bcm_op *op; 154 struct bcm_op *op;
156 155
157 len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p", 156 seq_printf(m, ">>> socket %p", sk->sk_socket);
158 sk->sk_socket); 157 seq_printf(m, " / sk %p", sk);
159 len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk); 158 seq_printf(m, " / bo %p", bo);
160 len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo); 159 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
161 len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu", 160 seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex));
162 bo->dropped_usr_msgs); 161 seq_printf(m, " <<<\n");
163 len += snprintf(page + len, PAGE_SIZE - len, " / bound %s",
164 bcm_proc_getifname(bo->ifindex));
165 len += snprintf(page + len, PAGE_SIZE - len, " <<<\n");
166 162
167 list_for_each_entry(op, &bo->rx_ops, list) { 163 list_for_each_entry(op, &bo->rx_ops, list) {
168 164
@@ -172,71 +168,62 @@ static int bcm_read_proc(char *page, char **start, off_t off,
172 if (!op->frames_abs) 168 if (!op->frames_abs)
173 continue; 169 continue;
174 170
175 len += snprintf(page + len, PAGE_SIZE - len, 171 seq_printf(m, "rx_op: %03X %-5s ",
176 "rx_op: %03X %-5s ",
177 op->can_id, bcm_proc_getifname(op->ifindex)); 172 op->can_id, bcm_proc_getifname(op->ifindex));
178 len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ", 173 seq_printf(m, "[%d]%c ", op->nframes,
179 op->nframes,
180 (op->flags & RX_CHECK_DLC)?'d':' '); 174 (op->flags & RX_CHECK_DLC)?'d':' ');
181 if (op->kt_ival1.tv64) 175 if (op->kt_ival1.tv64)
182 len += snprintf(page + len, PAGE_SIZE - len, 176 seq_printf(m, "timeo=%lld ",
183 "timeo=%lld ",
184 (long long) 177 (long long)
185 ktime_to_us(op->kt_ival1)); 178 ktime_to_us(op->kt_ival1));
186 179
187 if (op->kt_ival2.tv64) 180 if (op->kt_ival2.tv64)
188 len += snprintf(page + len, PAGE_SIZE - len, 181 seq_printf(m, "thr=%lld ",
189 "thr=%lld ",
190 (long long) 182 (long long)
191 ktime_to_us(op->kt_ival2)); 183 ktime_to_us(op->kt_ival2));
192 184
193 len += snprintf(page + len, PAGE_SIZE - len, 185 seq_printf(m, "# recv %ld (%ld) => reduction: ",
194 "# recv %ld (%ld) => reduction: ",
195 op->frames_filtered, op->frames_abs); 186 op->frames_filtered, op->frames_abs);
196 187
197 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 188 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
198 189
199 len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n", 190 seq_printf(m, "%s%ld%%\n",
200 (reduction == 100)?"near ":"", reduction); 191 (reduction == 100)?"near ":"", reduction);
201
202 if (len > PAGE_SIZE - 200) {
203 /* mark output cut off */
204 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
205 break;
206 }
207 } 192 }
208 193
209 list_for_each_entry(op, &bo->tx_ops, list) { 194 list_for_each_entry(op, &bo->tx_ops, list) {
210 195
211 len += snprintf(page + len, PAGE_SIZE - len, 196 seq_printf(m, "tx_op: %03X %s [%d] ",
212 "tx_op: %03X %s [%d] ",
213 op->can_id, bcm_proc_getifname(op->ifindex), 197 op->can_id, bcm_proc_getifname(op->ifindex),
214 op->nframes); 198 op->nframes);
215 199
216 if (op->kt_ival1.tv64) 200 if (op->kt_ival1.tv64)
217 len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ", 201 seq_printf(m, "t1=%lld ",
218 (long long) ktime_to_us(op->kt_ival1)); 202 (long long) ktime_to_us(op->kt_ival1));
219 203
220 if (op->kt_ival2.tv64) 204 if (op->kt_ival2.tv64)
221 len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ", 205 seq_printf(m, "t2=%lld ",
222 (long long) ktime_to_us(op->kt_ival2)); 206 (long long) ktime_to_us(op->kt_ival2));
223 207
224 len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n", 208 seq_printf(m, "# sent %ld\n", op->frames_abs);
225 op->frames_abs);
226
227 if (len > PAGE_SIZE - 100) {
228 /* mark output cut off */
229 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
230 break;
231 }
232 } 209 }
210 seq_putc(m, '\n');
211 return 0;
212}
233 213
234 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 214static int bcm_proc_open(struct inode *inode, struct file *file)
235 215{
236 *eof = 1; 216 return single_open(file, bcm_proc_show, PDE(inode)->data);
237 return len;
238} 217}
239 218
219static const struct file_operations bcm_proc_fops = {
220 .owner = THIS_MODULE,
221 .open = bcm_proc_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
240/* 227/*
241 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 228 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
242 * of the given bcm tx op 229 * of the given bcm tx op
@@ -1515,9 +1502,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1515 if (proc_dir) { 1502 if (proc_dir) {
1516 /* unique socket address as filename */ 1503 /* unique socket address as filename */
1517 sprintf(bo->procname, "%p", sock); 1504 sprintf(bo->procname, "%p", sock);
1518 bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644, 1505 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1519 proc_dir, 1506 proc_dir,
1520 bcm_read_proc, sk); 1507 &bcm_proc_fops, sk);
1521 } 1508 }
1522 1509
1523 return 0; 1510 return 0;
diff --git a/net/can/proc.c b/net/can/proc.c
index 1463653dbe34..9b9ad29be567 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -196,8 +196,8 @@ void can_stat_update(unsigned long data)
196 * 196 *
197 */ 197 */
198 198
199static int can_print_rcvlist(char *page, int len, struct hlist_head *rx_list, 199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
200 struct net_device *dev) 200 struct net_device *dev)
201{ 201{
202 struct receiver *r; 202 struct receiver *r;
203 struct hlist_node *n; 203 struct hlist_node *n;
@@ -208,199 +208,188 @@ static int can_print_rcvlist(char *page, int len, struct hlist_head *rx_list,
208 " %-5s %08X %08x %08x %08x %8ld %s\n" : 208 " %-5s %08X %08x %08x %08x %8ld %s\n" :
209 " %-5s %03X %08x %08lx %08lx %8ld %s\n"; 209 " %-5s %03X %08x %08lx %08lx %8ld %s\n";
210 210
211 len += snprintf(page + len, PAGE_SIZE - len, fmt, 211 seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
212 DNAME(dev), r->can_id, r->mask,
213 (unsigned long)r->func, (unsigned long)r->data, 212 (unsigned long)r->func, (unsigned long)r->data,
214 r->matches, r->ident); 213 r->matches, r->ident);
215
216 /* does a typical line fit into the current buffer? */
217
218 /* 100 Bytes before end of buffer */
219 if (len > PAGE_SIZE - 100) {
220 /* mark output cut off */
221 len += snprintf(page + len, PAGE_SIZE - len,
222 " (..)\n");
223 break;
224 }
225 } 214 }
226 rcu_read_unlock(); 215 rcu_read_unlock();
227
228 return len;
229} 216}
230 217
231static int can_print_recv_banner(char *page, int len) 218static void can_print_recv_banner(struct seq_file *m)
232{ 219{
233 /* 220 /*
234 * can1. 00000000 00000000 00000000 221 * can1. 00000000 00000000 00000000
235 * ....... 0 tp20 222 * ....... 0 tp20
236 */ 223 */
237 len += snprintf(page + len, PAGE_SIZE - len, 224 seq_puts(m, " device can_id can_mask function"
238 " device can_id can_mask function"
239 " userdata matches ident\n"); 225 " userdata matches ident\n");
240
241 return len;
242} 226}
243 227
244static int can_proc_read_stats(char *page, char **start, off_t off, 228static int can_stats_proc_show(struct seq_file *m, void *v)
245 int count, int *eof, void *data)
246{ 229{
247 int len = 0; 230 seq_putc(m, '\n');
231 seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames);
232 seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames);
233 seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches);
248 234
249 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 235 seq_putc(m, '\n');
250 len += snprintf(page + len, PAGE_SIZE - len,
251 " %8ld transmitted frames (TXF)\n",
252 can_stats.tx_frames);
253 len += snprintf(page + len, PAGE_SIZE - len,
254 " %8ld received frames (RXF)\n", can_stats.rx_frames);
255 len += snprintf(page + len, PAGE_SIZE - len,
256 " %8ld matched frames (RXMF)\n", can_stats.matches);
257
258 len += snprintf(page + len, PAGE_SIZE - len, "\n");
259 236
260 if (can_stattimer.function == can_stat_update) { 237 if (can_stattimer.function == can_stat_update) {
261 len += snprintf(page + len, PAGE_SIZE - len, 238 seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
262 " %8ld %% total match ratio (RXMR)\n",
263 can_stats.total_rx_match_ratio); 239 can_stats.total_rx_match_ratio);
264 240
265 len += snprintf(page + len, PAGE_SIZE - len, 241 seq_printf(m, " %8ld frames/s total tx rate (TXR)\n",
266 " %8ld frames/s total tx rate (TXR)\n",
267 can_stats.total_tx_rate); 242 can_stats.total_tx_rate);
268 len += snprintf(page + len, PAGE_SIZE - len, 243 seq_printf(m, " %8ld frames/s total rx rate (RXR)\n",
269 " %8ld frames/s total rx rate (RXR)\n",
270 can_stats.total_rx_rate); 244 can_stats.total_rx_rate);
271 245
272 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 246 seq_putc(m, '\n');
273 247
274 len += snprintf(page + len, PAGE_SIZE - len, 248 seq_printf(m, " %8ld %% current match ratio (CRXMR)\n",
275 " %8ld %% current match ratio (CRXMR)\n",
276 can_stats.current_rx_match_ratio); 249 can_stats.current_rx_match_ratio);
277 250
278 len += snprintf(page + len, PAGE_SIZE - len, 251 seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n",
279 " %8ld frames/s current tx rate (CTXR)\n",
280 can_stats.current_tx_rate); 252 can_stats.current_tx_rate);
281 len += snprintf(page + len, PAGE_SIZE - len, 253 seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n",
282 " %8ld frames/s current rx rate (CRXR)\n",
283 can_stats.current_rx_rate); 254 can_stats.current_rx_rate);
284 255
285 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 256 seq_putc(m, '\n');
286 257
287 len += snprintf(page + len, PAGE_SIZE - len, 258 seq_printf(m, " %8ld %% max match ratio (MRXMR)\n",
288 " %8ld %% max match ratio (MRXMR)\n",
289 can_stats.max_rx_match_ratio); 259 can_stats.max_rx_match_ratio);
290 260
291 len += snprintf(page + len, PAGE_SIZE - len, 261 seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n",
292 " %8ld frames/s max tx rate (MTXR)\n",
293 can_stats.max_tx_rate); 262 can_stats.max_tx_rate);
294 len += snprintf(page + len, PAGE_SIZE - len, 263 seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n",
295 " %8ld frames/s max rx rate (MRXR)\n",
296 can_stats.max_rx_rate); 264 can_stats.max_rx_rate);
297 265
298 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 266 seq_putc(m, '\n');
299 } 267 }
300 268
301 len += snprintf(page + len, PAGE_SIZE - len, 269 seq_printf(m, " %8ld current receive list entries (CRCV)\n",
302 " %8ld current receive list entries (CRCV)\n",
303 can_pstats.rcv_entries); 270 can_pstats.rcv_entries);
304 len += snprintf(page + len, PAGE_SIZE - len, 271 seq_printf(m, " %8ld maximum receive list entries (MRCV)\n",
305 " %8ld maximum receive list entries (MRCV)\n",
306 can_pstats.rcv_entries_max); 272 can_pstats.rcv_entries_max);
307 273
308 if (can_pstats.stats_reset) 274 if (can_pstats.stats_reset)
309 len += snprintf(page + len, PAGE_SIZE - len, 275 seq_printf(m, "\n %8ld statistic resets (STR)\n",
310 "\n %8ld statistic resets (STR)\n",
311 can_pstats.stats_reset); 276 can_pstats.stats_reset);
312 277
313 if (can_pstats.user_reset) 278 if (can_pstats.user_reset)
314 len += snprintf(page + len, PAGE_SIZE - len, 279 seq_printf(m, " %8ld user statistic resets (USTR)\n",
315 " %8ld user statistic resets (USTR)\n",
316 can_pstats.user_reset); 280 can_pstats.user_reset);
317 281
318 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 282 seq_putc(m, '\n');
319 283 return 0;
320 *eof = 1;
321 return len;
322} 284}
323 285
324static int can_proc_read_reset_stats(char *page, char **start, off_t off, 286static int can_stats_proc_open(struct inode *inode, struct file *file)
325 int count, int *eof, void *data)
326{ 287{
327 int len = 0; 288 return single_open(file, can_stats_proc_show, NULL);
289}
290
291static const struct file_operations can_stats_proc_fops = {
292 .owner = THIS_MODULE,
293 .open = can_stats_proc_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
328 298
299static int can_reset_stats_proc_show(struct seq_file *m, void *v)
300{
329 user_reset = 1; 301 user_reset = 1;
330 302
331 if (can_stattimer.function == can_stat_update) { 303 if (can_stattimer.function == can_stat_update) {
332 len += snprintf(page + len, PAGE_SIZE - len, 304 seq_printf(m, "Scheduled statistic reset #%ld.\n",
333 "Scheduled statistic reset #%ld.\n",
334 can_pstats.stats_reset + 1); 305 can_pstats.stats_reset + 1);
335 306
336 } else { 307 } else {
337 if (can_stats.jiffies_init != jiffies) 308 if (can_stats.jiffies_init != jiffies)
338 can_init_stats(); 309 can_init_stats();
339 310
340 len += snprintf(page + len, PAGE_SIZE - len, 311 seq_printf(m, "Performed statistic reset #%ld.\n",
341 "Performed statistic reset #%ld.\n",
342 can_pstats.stats_reset); 312 can_pstats.stats_reset);
343 } 313 }
314 return 0;
315}
344 316
345 *eof = 1; 317static int can_reset_stats_proc_open(struct inode *inode, struct file *file)
346 return len; 318{
319 return single_open(file, can_reset_stats_proc_show, NULL);
347} 320}
348 321
349static int can_proc_read_version(char *page, char **start, off_t off, 322static const struct file_operations can_reset_stats_proc_fops = {
350 int count, int *eof, void *data) 323 .owner = THIS_MODULE,
324 .open = can_reset_stats_proc_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
330static int can_version_proc_show(struct seq_file *m, void *v)
351{ 331{
352 int len = 0; 332 seq_printf(m, "%s\n", CAN_VERSION_STRING);
333 return 0;
334}
353 335
354 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 336static int can_version_proc_open(struct inode *inode, struct file *file)
355 CAN_VERSION_STRING); 337{
356 *eof = 1; 338 return single_open(file, can_version_proc_show, NULL);
357 return len;
358} 339}
359 340
360static int can_proc_read_rcvlist(char *page, char **start, off_t off, 341static const struct file_operations can_version_proc_fops = {
361 int count, int *eof, void *data) 342 .owner = THIS_MODULE,
343 .open = can_version_proc_open,
344 .read = seq_read,
345 .llseek = seq_lseek,
346 .release = single_release,
347};
348
349static int can_rcvlist_proc_show(struct seq_file *m, void *v)
362{ 350{
363 /* double cast to prevent GCC warning */ 351 /* double cast to prevent GCC warning */
364 int idx = (int)(long)data; 352 int idx = (int)(long)m->private;
365 int len = 0;
366 struct dev_rcv_lists *d; 353 struct dev_rcv_lists *d;
367 struct hlist_node *n; 354 struct hlist_node *n;
368 355
369 len += snprintf(page + len, PAGE_SIZE - len, 356 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
370 "\nreceive list '%s':\n", rx_list_name[idx]);
371 357
372 rcu_read_lock(); 358 rcu_read_lock();
373 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { 359 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
374 360
375 if (!hlist_empty(&d->rx[idx])) { 361 if (!hlist_empty(&d->rx[idx])) {
376 len = can_print_recv_banner(page, len); 362 can_print_recv_banner(m);
377 len = can_print_rcvlist(page, len, &d->rx[idx], d->dev); 363 can_print_rcvlist(m, &d->rx[idx], d->dev);
378 } else 364 } else
379 len += snprintf(page + len, PAGE_SIZE - len, 365 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
380 " (%s: no entry)\n", DNAME(d->dev));
381
382 /* exit on end of buffer? */
383 if (len > PAGE_SIZE - 100)
384 break;
385 } 366 }
386 rcu_read_unlock(); 367 rcu_read_unlock();
387 368
388 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 369 seq_putc(m, '\n');
370 return 0;
371}
389 372
390 *eof = 1; 373static int can_rcvlist_proc_open(struct inode *inode, struct file *file)
391 return len; 374{
375 return single_open(file, can_rcvlist_proc_show, PDE(inode)->data);
392} 376}
393 377
394static int can_proc_read_rcvlist_sff(char *page, char **start, off_t off, 378static const struct file_operations can_rcvlist_proc_fops = {
395 int count, int *eof, void *data) 379 .owner = THIS_MODULE,
380 .open = can_rcvlist_proc_open,
381 .read = seq_read,
382 .llseek = seq_lseek,
383 .release = single_release,
384};
385
386static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
396{ 387{
397 int len = 0;
398 struct dev_rcv_lists *d; 388 struct dev_rcv_lists *d;
399 struct hlist_node *n; 389 struct hlist_node *n;
400 390
401 /* RX_SFF */ 391 /* RX_SFF */
402 len += snprintf(page + len, PAGE_SIZE - len, 392 seq_puts(m, "\nreceive list 'rx_sff':\n");
403 "\nreceive list 'rx_sff':\n");
404 393
405 rcu_read_lock(); 394 rcu_read_lock();
406 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { 395 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
@@ -413,46 +402,38 @@ static int can_proc_read_rcvlist_sff(char *page, char **start, off_t off,
413 } 402 }
414 403
415 if (!all_empty) { 404 if (!all_empty) {
416 len = can_print_recv_banner(page, len); 405 can_print_recv_banner(m);
417 for (i = 0; i < 0x800; i++) { 406 for (i = 0; i < 0x800; i++) {
418 if (!hlist_empty(&d->rx_sff[i]) && 407 if (!hlist_empty(&d->rx_sff[i]))
419 len < PAGE_SIZE - 100) 408 can_print_rcvlist(m, &d->rx_sff[i],
420 len = can_print_rcvlist(page, len, 409 d->dev);
421 &d->rx_sff[i],
422 d->dev);
423 } 410 }
424 } else 411 } else
425 len += snprintf(page + len, PAGE_SIZE - len, 412 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
426 " (%s: no entry)\n", DNAME(d->dev));
427
428 /* exit on end of buffer? */
429 if (len > PAGE_SIZE - 100)
430 break;
431 } 413 }
432 rcu_read_unlock(); 414 rcu_read_unlock();
433 415
434 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 416 seq_putc(m, '\n');
417 return 0;
418}
435 419
436 *eof = 1; 420static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file)
437 return len; 421{
422 return single_open(file, can_rcvlist_sff_proc_show, NULL);
438} 423}
439 424
425static const struct file_operations can_rcvlist_sff_proc_fops = {
426 .owner = THIS_MODULE,
427 .open = can_rcvlist_sff_proc_open,
428 .read = seq_read,
429 .llseek = seq_lseek,
430 .release = single_release,
431};
432
440/* 433/*
441 * proc utility functions 434 * proc utility functions
442 */ 435 */
443 436
444static struct proc_dir_entry *can_create_proc_readentry(const char *name,
445 mode_t mode,
446 read_proc_t *read_proc,
447 void *data)
448{
449 if (can_dir)
450 return create_proc_read_entry(name, mode, can_dir, read_proc,
451 data);
452 else
453 return NULL;
454}
455
456static void can_remove_proc_readentry(const char *name) 437static void can_remove_proc_readentry(const char *name)
457{ 438{
458 if (can_dir) 439 if (can_dir)
@@ -474,24 +455,24 @@ void can_init_proc(void)
474 } 455 }
475 456
476 /* own procfs entries from the AF_CAN core */ 457 /* own procfs entries from the AF_CAN core */
477 pde_version = can_create_proc_readentry(CAN_PROC_VERSION, 0644, 458 pde_version = proc_create(CAN_PROC_VERSION, 0644, can_dir,
478 can_proc_read_version, NULL); 459 &can_version_proc_fops);
479 pde_stats = can_create_proc_readentry(CAN_PROC_STATS, 0644, 460 pde_stats = proc_create(CAN_PROC_STATS, 0644, can_dir,
480 can_proc_read_stats, NULL); 461 &can_stats_proc_fops);
481 pde_reset_stats = can_create_proc_readentry(CAN_PROC_RESET_STATS, 0644, 462 pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir,
482 can_proc_read_reset_stats, NULL); 463 &can_reset_stats_proc_fops);
483 pde_rcvlist_err = can_create_proc_readentry(CAN_PROC_RCVLIST_ERR, 0644, 464 pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir,
484 can_proc_read_rcvlist, (void *)RX_ERR); 465 &can_rcvlist_proc_fops, (void *)RX_ERR);
485 pde_rcvlist_all = can_create_proc_readentry(CAN_PROC_RCVLIST_ALL, 0644, 466 pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir,
486 can_proc_read_rcvlist, (void *)RX_ALL); 467 &can_rcvlist_proc_fops, (void *)RX_ALL);
487 pde_rcvlist_fil = can_create_proc_readentry(CAN_PROC_RCVLIST_FIL, 0644, 468 pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir,
488 can_proc_read_rcvlist, (void *)RX_FIL); 469 &can_rcvlist_proc_fops, (void *)RX_FIL);
489 pde_rcvlist_inv = can_create_proc_readentry(CAN_PROC_RCVLIST_INV, 0644, 470 pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
490 can_proc_read_rcvlist, (void *)RX_INV); 471 &can_rcvlist_proc_fops, (void *)RX_INV);
491 pde_rcvlist_eff = can_create_proc_readentry(CAN_PROC_RCVLIST_EFF, 0644, 472 pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
492 can_proc_read_rcvlist, (void *)RX_EFF); 473 &can_rcvlist_proc_fops, (void *)RX_EFF);
493 pde_rcvlist_sff = can_create_proc_readentry(CAN_PROC_RCVLIST_SFF, 0644, 474 pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
494 can_proc_read_rcvlist_sff, NULL); 475 &can_rcvlist_sff_proc_fops);
495} 476}
496 477
497/* 478/*
diff --git a/net/can/raw.c b/net/can/raw.c
index f4cc44548bda..db3152df7d2b 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -401,6 +401,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
401 if (peer) 401 if (peer)
402 return -EOPNOTSUPP; 402 return -EOPNOTSUPP;
403 403
404 memset(addr, 0, sizeof(*addr));
404 addr->can_family = AF_CAN; 405 addr->can_family = AF_CAN;
405 addr->can_ifindex = ro->ifindex; 406 addr->can_ifindex = ro->ifindex;
406 407
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b0fe69211eef..1c6cf3a1a4f6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -55,6 +55,7 @@
55#include <net/checksum.h> 55#include <net/checksum.h>
56#include <net/sock.h> 56#include <net/sock.h>
57#include <net/tcp_states.h> 57#include <net/tcp_states.h>
58#include <trace/events/skb.h>
58 59
59/* 60/*
60 * Is a socket 'connection oriented' ? 61 * Is a socket 'connection oriented' ?
@@ -284,6 +285,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
284 int i, copy = start - offset; 285 int i, copy = start - offset;
285 struct sk_buff *frag_iter; 286 struct sk_buff *frag_iter;
286 287
288 trace_skb_copy_datagram_iovec(skb, len);
289
287 /* Copy header. */ 290 /* Copy header. */
288 if (copy > 0) { 291 if (copy > 0) {
289 if (copy > len) 292 if (copy > len)
diff --git a/net/core/dev.c b/net/core/dev.c
index a0bc087616a4..1a6561b1eb90 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -191,7 +191,6 @@ static struct list_head ptype_all __read_mostly; /* Taps */
191 * semaphore held. 191 * semaphore held.
192 */ 192 */
193DEFINE_RWLOCK(dev_base_lock); 193DEFINE_RWLOCK(dev_base_lock);
194
195EXPORT_SYMBOL(dev_base_lock); 194EXPORT_SYMBOL(dev_base_lock);
196 195
197#define NETDEV_HASHBITS 8 196#define NETDEV_HASHBITS 8
@@ -248,6 +247,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
248 */ 247 */
249 248
250DEFINE_PER_CPU(struct softnet_data, softnet_data); 249DEFINE_PER_CPU(struct softnet_data, softnet_data);
250EXPORT_PER_CPU_SYMBOL(softnet_data);
251 251
252#ifdef CONFIG_LOCKDEP 252#ifdef CONFIG_LOCKDEP
253/* 253/*
@@ -269,7 +269,7 @@ static const unsigned short netdev_lock_type[] =
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, 272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
273 ARPHRD_VOID, ARPHRD_NONE}; 273 ARPHRD_VOID, ARPHRD_NONE};
274 274
275static const char *const netdev_lock_name[] = 275static const char *const netdev_lock_name[] =
@@ -287,7 +287,7 @@ static const char *const netdev_lock_name[] =
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", 290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
291 "_xmit_VOID", "_xmit_NONE"}; 291 "_xmit_VOID", "_xmit_NONE"};
292 292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -381,6 +381,7 @@ void dev_add_pack(struct packet_type *pt)
381 } 381 }
382 spin_unlock_bh(&ptype_lock); 382 spin_unlock_bh(&ptype_lock);
383} 383}
384EXPORT_SYMBOL(dev_add_pack);
384 385
385/** 386/**
386 * __dev_remove_pack - remove packet handler 387 * __dev_remove_pack - remove packet handler
@@ -418,6 +419,8 @@ void __dev_remove_pack(struct packet_type *pt)
418out: 419out:
419 spin_unlock_bh(&ptype_lock); 420 spin_unlock_bh(&ptype_lock);
420} 421}
422EXPORT_SYMBOL(__dev_remove_pack);
423
421/** 424/**
422 * dev_remove_pack - remove packet handler 425 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration 426 * @pt: packet type declaration
@@ -436,6 +439,7 @@ void dev_remove_pack(struct packet_type *pt)
436 439
437 synchronize_net(); 440 synchronize_net();
438} 441}
442EXPORT_SYMBOL(dev_remove_pack);
439 443
440/****************************************************************************** 444/******************************************************************************
441 445
@@ -499,6 +503,7 @@ int netdev_boot_setup_check(struct net_device *dev)
499 } 503 }
500 return 0; 504 return 0;
501} 505}
506EXPORT_SYMBOL(netdev_boot_setup_check);
502 507
503 508
504/** 509/**
@@ -591,6 +596,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name)
591 } 596 }
592 return NULL; 597 return NULL;
593} 598}
599EXPORT_SYMBOL(__dev_get_by_name);
594 600
595/** 601/**
596 * dev_get_by_name - find a device by its name 602 * dev_get_by_name - find a device by its name
@@ -615,6 +621,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name)
615 read_unlock(&dev_base_lock); 621 read_unlock(&dev_base_lock);
616 return dev; 622 return dev;
617} 623}
624EXPORT_SYMBOL(dev_get_by_name);
618 625
619/** 626/**
620 * __dev_get_by_index - find a device by its ifindex 627 * __dev_get_by_index - find a device by its ifindex
@@ -640,6 +647,7 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex)
640 } 647 }
641 return NULL; 648 return NULL;
642} 649}
650EXPORT_SYMBOL(__dev_get_by_index);
643 651
644 652
645/** 653/**
@@ -664,6 +672,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
664 read_unlock(&dev_base_lock); 672 read_unlock(&dev_base_lock);
665 return dev; 673 return dev;
666} 674}
675EXPORT_SYMBOL(dev_get_by_index);
667 676
668/** 677/**
669 * dev_getbyhwaddr - find a device by its hardware address 678 * dev_getbyhwaddr - find a device by its hardware address
@@ -693,7 +702,6 @@ struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *h
693 702
694 return NULL; 703 return NULL;
695} 704}
696
697EXPORT_SYMBOL(dev_getbyhwaddr); 705EXPORT_SYMBOL(dev_getbyhwaddr);
698 706
699struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 707struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
@@ -707,7 +715,6 @@ struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
707 715
708 return NULL; 716 return NULL;
709} 717}
710
711EXPORT_SYMBOL(__dev_getfirstbyhwtype); 718EXPORT_SYMBOL(__dev_getfirstbyhwtype);
712 719
713struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 720struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
@@ -721,7 +728,6 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
721 rtnl_unlock(); 728 rtnl_unlock();
722 return dev; 729 return dev;
723} 730}
724
725EXPORT_SYMBOL(dev_getfirstbyhwtype); 731EXPORT_SYMBOL(dev_getfirstbyhwtype);
726 732
727/** 733/**
@@ -736,7 +742,8 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype);
736 * dev_put to indicate they have finished with it. 742 * dev_put to indicate they have finished with it.
737 */ 743 */
738 744
739struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) 745struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
746 unsigned short mask)
740{ 747{
741 struct net_device *dev, *ret; 748 struct net_device *dev, *ret;
742 749
@@ -752,6 +759,7 @@ struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, u
752 read_unlock(&dev_base_lock); 759 read_unlock(&dev_base_lock);
753 return ret; 760 return ret;
754} 761}
762EXPORT_SYMBOL(dev_get_by_flags);
755 763
756/** 764/**
757 * dev_valid_name - check if name is okay for network device 765 * dev_valid_name - check if name is okay for network device
@@ -777,6 +785,7 @@ int dev_valid_name(const char *name)
777 } 785 }
778 return 1; 786 return 1;
779} 787}
788EXPORT_SYMBOL(dev_valid_name);
780 789
781/** 790/**
782 * __dev_alloc_name - allocate a name for a device 791 * __dev_alloc_name - allocate a name for a device
@@ -870,6 +879,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
870 strlcpy(dev->name, buf, IFNAMSIZ); 879 strlcpy(dev->name, buf, IFNAMSIZ);
871 return ret; 880 return ret;
872} 881}
882EXPORT_SYMBOL(dev_alloc_name);
873 883
874 884
875/** 885/**
@@ -906,8 +916,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
906 err = dev_alloc_name(dev, newname); 916 err = dev_alloc_name(dev, newname);
907 if (err < 0) 917 if (err < 0)
908 return err; 918 return err;
909 } 919 } else if (__dev_get_by_name(net, newname))
910 else if (__dev_get_by_name(net, newname))
911 return -EEXIST; 920 return -EEXIST;
912 else 921 else
913 strlcpy(dev->name, newname, IFNAMSIZ); 922 strlcpy(dev->name, newname, IFNAMSIZ);
@@ -970,7 +979,7 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
970 return 0; 979 return 0;
971 } 980 }
972 981
973 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL); 982 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
974 if (!dev->ifalias) 983 if (!dev->ifalias)
975 return -ENOMEM; 984 return -ENOMEM;
976 985
@@ -1006,6 +1015,7 @@ void netdev_state_change(struct net_device *dev)
1006 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1015 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 } 1016 }
1008} 1017}
1018EXPORT_SYMBOL(netdev_state_change);
1009 1019
1010void netdev_bonding_change(struct net_device *dev) 1020void netdev_bonding_change(struct net_device *dev)
1011{ 1021{
@@ -1034,6 +1044,7 @@ void dev_load(struct net *net, const char *name)
1034 if (!dev && capable(CAP_SYS_MODULE)) 1044 if (!dev && capable(CAP_SYS_MODULE))
1035 request_module("%s", name); 1045 request_module("%s", name);
1036} 1046}
1047EXPORT_SYMBOL(dev_load);
1037 1048
1038/** 1049/**
1039 * dev_open - prepare an interface for use. 1050 * dev_open - prepare an interface for use.
@@ -1118,6 +1129,7 @@ int dev_open(struct net_device *dev)
1118 1129
1119 return ret; 1130 return ret;
1120} 1131}
1132EXPORT_SYMBOL(dev_open);
1121 1133
1122/** 1134/**
1123 * dev_close - shutdown an interface. 1135 * dev_close - shutdown an interface.
@@ -1184,6 +1196,7 @@ int dev_close(struct net_device *dev)
1184 1196
1185 return 0; 1197 return 0;
1186} 1198}
1199EXPORT_SYMBOL(dev_close);
1187 1200
1188 1201
1189/** 1202/**
@@ -1279,6 +1292,7 @@ rollback:
1279 raw_notifier_chain_unregister(&netdev_chain, nb); 1292 raw_notifier_chain_unregister(&netdev_chain, nb);
1280 goto unlock; 1293 goto unlock;
1281} 1294}
1295EXPORT_SYMBOL(register_netdevice_notifier);
1282 1296
1283/** 1297/**
1284 * unregister_netdevice_notifier - unregister a network notifier block 1298 * unregister_netdevice_notifier - unregister a network notifier block
@@ -1299,6 +1313,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
1299 rtnl_unlock(); 1313 rtnl_unlock();
1300 return err; 1314 return err;
1301} 1315}
1316EXPORT_SYMBOL(unregister_netdevice_notifier);
1302 1317
1303/** 1318/**
1304 * call_netdevice_notifiers - call all network notifier blocks 1319 * call_netdevice_notifiers - call all network notifier blocks
@@ -1321,11 +1336,13 @@ void net_enable_timestamp(void)
1321{ 1336{
1322 atomic_inc(&netstamp_needed); 1337 atomic_inc(&netstamp_needed);
1323} 1338}
1339EXPORT_SYMBOL(net_enable_timestamp);
1324 1340
1325void net_disable_timestamp(void) 1341void net_disable_timestamp(void)
1326{ 1342{
1327 atomic_dec(&netstamp_needed); 1343 atomic_dec(&netstamp_needed);
1328} 1344}
1345EXPORT_SYMBOL(net_disable_timestamp);
1329 1346
1330static inline void net_timestamp(struct sk_buff *skb) 1347static inline void net_timestamp(struct sk_buff *skb)
1331{ 1348{
@@ -1359,7 +1376,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1359 if ((ptype->dev == dev || !ptype->dev) && 1376 if ((ptype->dev == dev || !ptype->dev) &&
1360 (ptype->af_packet_priv == NULL || 1377 (ptype->af_packet_priv == NULL ||
1361 (struct sock *)ptype->af_packet_priv != skb->sk)) { 1378 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1362 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); 1379 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1363 if (!skb2) 1380 if (!skb2)
1364 break; 1381 break;
1365 1382
@@ -1527,6 +1544,7 @@ out_set_summed:
1527out: 1544out:
1528 return ret; 1545 return ret;
1529} 1546}
1547EXPORT_SYMBOL(skb_checksum_help);
1530 1548
1531/** 1549/**
1532 * skb_gso_segment - Perform segmentation on skb. 1550 * skb_gso_segment - Perform segmentation on skb.
@@ -1589,7 +1607,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1589 1607
1590 return segs; 1608 return segs;
1591} 1609}
1592
1593EXPORT_SYMBOL(skb_gso_segment); 1610EXPORT_SYMBOL(skb_gso_segment);
1594 1611
1595/* Take action when hardware reception checksum errors are detected. */ 1612/* Take action when hardware reception checksum errors are detected. */
@@ -1755,7 +1772,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1755 1772
1756 if (skb_rx_queue_recorded(skb)) { 1773 if (skb_rx_queue_recorded(skb)) {
1757 hash = skb_get_rx_queue(skb); 1774 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues)) 1775 while (unlikely(hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues; 1776 hash -= dev->real_num_tx_queues;
1760 return hash; 1777 return hash;
1761 } 1778 }
@@ -1890,7 +1907,7 @@ gso:
1890 q = rcu_dereference(txq->qdisc); 1907 q = rcu_dereference(txq->qdisc);
1891 1908
1892#ifdef CONFIG_NET_CLS_ACT 1909#ifdef CONFIG_NET_CLS_ACT
1893 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1910 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1894#endif 1911#endif
1895 if (q->enqueue) { 1912 if (q->enqueue) {
1896 rc = __dev_xmit_skb(skb, q, dev, txq); 1913 rc = __dev_xmit_skb(skb, q, dev, txq);
@@ -1917,7 +1934,7 @@ gso:
1917 HARD_TX_LOCK(dev, txq, cpu); 1934 HARD_TX_LOCK(dev, txq, cpu);
1918 1935
1919 if (!netif_tx_queue_stopped(txq)) { 1936 if (!netif_tx_queue_stopped(txq)) {
1920 rc = 0; 1937 rc = NET_XMIT_SUCCESS;
1921 if (!dev_hard_start_xmit(skb, dev, txq)) { 1938 if (!dev_hard_start_xmit(skb, dev, txq)) {
1922 HARD_TX_UNLOCK(dev, txq); 1939 HARD_TX_UNLOCK(dev, txq);
1923 goto out; 1940 goto out;
@@ -1946,6 +1963,7 @@ out:
1946 rcu_read_unlock_bh(); 1963 rcu_read_unlock_bh();
1947 return rc; 1964 return rc;
1948} 1965}
1966EXPORT_SYMBOL(dev_queue_xmit);
1949 1967
1950 1968
1951/*======================================================================= 1969/*=======================================================================
@@ -2012,6 +2030,7 @@ enqueue:
2012 kfree_skb(skb); 2030 kfree_skb(skb);
2013 return NET_RX_DROP; 2031 return NET_RX_DROP;
2014} 2032}
2033EXPORT_SYMBOL(netif_rx);
2015 2034
2016int netif_rx_ni(struct sk_buff *skb) 2035int netif_rx_ni(struct sk_buff *skb)
2017{ 2036{
@@ -2025,7 +2044,6 @@ int netif_rx_ni(struct sk_buff *skb)
2025 2044
2026 return err; 2045 return err;
2027} 2046}
2028
2029EXPORT_SYMBOL(netif_rx_ni); 2047EXPORT_SYMBOL(netif_rx_ni);
2030 2048
2031static void net_tx_action(struct softirq_action *h) 2049static void net_tx_action(struct softirq_action *h)
@@ -2358,6 +2376,7 @@ out:
2358 rcu_read_unlock(); 2376 rcu_read_unlock();
2359 return ret; 2377 return ret;
2360} 2378}
2379EXPORT_SYMBOL(netif_receive_skb);
2361 2380
2362/* Network device is going away, flush any packets still pending */ 2381/* Network device is going away, flush any packets still pending */
2363static void flush_backlog(void *arg) 2382static void flush_backlog(void *arg)
@@ -2874,7 +2893,7 @@ softnet_break:
2874 goto out; 2893 goto out;
2875} 2894}
2876 2895
2877static gifconf_func_t * gifconf_list [NPROTO]; 2896static gifconf_func_t *gifconf_list[NPROTO];
2878 2897
2879/** 2898/**
2880 * register_gifconf - register a SIOCGIF handler 2899 * register_gifconf - register a SIOCGIF handler
@@ -2885,13 +2904,14 @@ static gifconf_func_t * gifconf_list [NPROTO];
2885 * that is passed must not be freed or reused until it has been replaced 2904 * that is passed must not be freed or reused until it has been replaced
2886 * by another handler. 2905 * by another handler.
2887 */ 2906 */
2888int register_gifconf(unsigned int family, gifconf_func_t * gifconf) 2907int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
2889{ 2908{
2890 if (family >= NPROTO) 2909 if (family >= NPROTO)
2891 return -EINVAL; 2910 return -EINVAL;
2892 gifconf_list[family] = gifconf; 2911 gifconf_list[family] = gifconf;
2893 return 0; 2912 return 0;
2894} 2913}
2914EXPORT_SYMBOL(register_gifconf);
2895 2915
2896 2916
2897/* 2917/*
@@ -3102,7 +3122,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
3102 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 3122 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3103 s->total, s->dropped, s->time_squeeze, 0, 3123 s->total, s->dropped, s->time_squeeze, 0,
3104 0, 0, 0, 0, /* was fastroute */ 3124 0, 0, 0, 0, /* was fastroute */
3105 s->cpu_collision ); 3125 s->cpu_collision);
3106 return 0; 3126 return 0;
3107} 3127}
3108 3128
@@ -3338,6 +3358,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
3338 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); 3358 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3339 return 0; 3359 return 0;
3340} 3360}
3361EXPORT_SYMBOL(netdev_set_master);
3341 3362
3342static void dev_change_rx_flags(struct net_device *dev, int flags) 3363static void dev_change_rx_flags(struct net_device *dev, int flags)
3343{ 3364{
@@ -3416,6 +3437,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
3416 dev_set_rx_mode(dev); 3437 dev_set_rx_mode(dev);
3417 return err; 3438 return err;
3418} 3439}
3440EXPORT_SYMBOL(dev_set_promiscuity);
3419 3441
3420/** 3442/**
3421 * dev_set_allmulti - update allmulti count on a device 3443 * dev_set_allmulti - update allmulti count on a device
@@ -3459,6 +3481,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3459 } 3481 }
3460 return 0; 3482 return 0;
3461} 3483}
3484EXPORT_SYMBOL(dev_set_allmulti);
3462 3485
3463/* 3486/*
3464 * Upload unicast and multicast address lists to device and 3487 * Upload unicast and multicast address lists to device and
@@ -3887,10 +3910,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr)
3887 3910
3888 ASSERT_RTNL(); 3911 ASSERT_RTNL();
3889 3912
3913 netif_addr_lock_bh(dev);
3890 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 3914 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3891 NETDEV_HW_ADDR_T_UNICAST); 3915 NETDEV_HW_ADDR_T_UNICAST);
3892 if (!err) 3916 if (!err)
3893 __dev_set_rx_mode(dev); 3917 __dev_set_rx_mode(dev);
3918 netif_addr_unlock_bh(dev);
3894 return err; 3919 return err;
3895} 3920}
3896EXPORT_SYMBOL(dev_unicast_delete); 3921EXPORT_SYMBOL(dev_unicast_delete);
@@ -3911,10 +3936,12 @@ int dev_unicast_add(struct net_device *dev, void *addr)
3911 3936
3912 ASSERT_RTNL(); 3937 ASSERT_RTNL();
3913 3938
3939 netif_addr_lock_bh(dev);
3914 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 3940 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3915 NETDEV_HW_ADDR_T_UNICAST); 3941 NETDEV_HW_ADDR_T_UNICAST);
3916 if (!err) 3942 if (!err)
3917 __dev_set_rx_mode(dev); 3943 __dev_set_rx_mode(dev);
3944 netif_addr_unlock_bh(dev);
3918 return err; 3945 return err;
3919} 3946}
3920EXPORT_SYMBOL(dev_unicast_add); 3947EXPORT_SYMBOL(dev_unicast_add);
@@ -3973,7 +4000,8 @@ EXPORT_SYMBOL_GPL(__dev_addr_unsync);
3973 * @from: source device 4000 * @from: source device
3974 * 4001 *
3975 * Add newly added addresses to the destination device and release 4002 * Add newly added addresses to the destination device and release
3976 * addresses that have no users left. 4003 * addresses that have no users left. The source device must be
4004 * locked by netif_tx_lock_bh.
3977 * 4005 *
3978 * This function is intended to be called from the dev->set_rx_mode 4006 * This function is intended to be called from the dev->set_rx_mode
3979 * function of layered software devices. 4007 * function of layered software devices.
@@ -3982,14 +4010,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3982{ 4010{
3983 int err = 0; 4011 int err = 0;
3984 4012
3985 ASSERT_RTNL();
3986
3987 if (to->addr_len != from->addr_len) 4013 if (to->addr_len != from->addr_len)
3988 return -EINVAL; 4014 return -EINVAL;
3989 4015
4016 netif_addr_lock_bh(to);
3990 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 4017 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
3991 if (!err) 4018 if (!err)
3992 __dev_set_rx_mode(to); 4019 __dev_set_rx_mode(to);
4020 netif_addr_unlock_bh(to);
3993 return err; 4021 return err;
3994} 4022}
3995EXPORT_SYMBOL(dev_unicast_sync); 4023EXPORT_SYMBOL(dev_unicast_sync);
@@ -4005,27 +4033,27 @@ EXPORT_SYMBOL(dev_unicast_sync);
4005 */ 4033 */
4006void dev_unicast_unsync(struct net_device *to, struct net_device *from) 4034void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4007{ 4035{
4008 ASSERT_RTNL();
4009
4010 if (to->addr_len != from->addr_len) 4036 if (to->addr_len != from->addr_len)
4011 return; 4037 return;
4012 4038
4039 netif_addr_lock_bh(from);
4040 netif_addr_lock(to);
4013 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 4041 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4014 __dev_set_rx_mode(to); 4042 __dev_set_rx_mode(to);
4043 netif_addr_unlock(to);
4044 netif_addr_unlock_bh(from);
4015} 4045}
4016EXPORT_SYMBOL(dev_unicast_unsync); 4046EXPORT_SYMBOL(dev_unicast_unsync);
4017 4047
4018static void dev_unicast_flush(struct net_device *dev) 4048static void dev_unicast_flush(struct net_device *dev)
4019{ 4049{
4020 /* rtnl_mutex must be held here */ 4050 netif_addr_lock_bh(dev);
4021
4022 __hw_addr_flush(&dev->uc); 4051 __hw_addr_flush(&dev->uc);
4052 netif_addr_unlock_bh(dev);
4023} 4053}
4024 4054
4025static void dev_unicast_init(struct net_device *dev) 4055static void dev_unicast_init(struct net_device *dev)
4026{ 4056{
4027 /* rtnl_mutex must be held here */
4028
4029 __hw_addr_init(&dev->uc); 4057 __hw_addr_init(&dev->uc);
4030} 4058}
4031 4059
@@ -4083,6 +4111,7 @@ unsigned dev_get_flags(const struct net_device *dev)
4083 4111
4084 return flags; 4112 return flags;
4085} 4113}
4114EXPORT_SYMBOL(dev_get_flags);
4086 4115
4087/** 4116/**
4088 * dev_change_flags - change device settings 4117 * dev_change_flags - change device settings
@@ -4133,12 +4162,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4133 } 4162 }
4134 4163
4135 if (dev->flags & IFF_UP && 4164 if (dev->flags & IFF_UP &&
4136 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 4165 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4137 IFF_VOLATILE))) 4166 IFF_VOLATILE)))
4138 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4167 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4139 4168
4140 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4169 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4141 int inc = (flags & IFF_PROMISC) ? +1 : -1; 4170 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4171
4142 dev->gflags ^= IFF_PROMISC; 4172 dev->gflags ^= IFF_PROMISC;
4143 dev_set_promiscuity(dev, inc); 4173 dev_set_promiscuity(dev, inc);
4144 } 4174 }
@@ -4148,7 +4178,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4148 IFF_ALLMULTI is requested not asking us and not reporting. 4178 IFF_ALLMULTI is requested not asking us and not reporting.
4149 */ 4179 */
4150 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4180 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4151 int inc = (flags & IFF_ALLMULTI) ? +1 : -1; 4181 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4182
4152 dev->gflags ^= IFF_ALLMULTI; 4183 dev->gflags ^= IFF_ALLMULTI;
4153 dev_set_allmulti(dev, inc); 4184 dev_set_allmulti(dev, inc);
4154 } 4185 }
@@ -4160,6 +4191,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4160 4191
4161 return ret; 4192 return ret;
4162} 4193}
4194EXPORT_SYMBOL(dev_change_flags);
4163 4195
4164/** 4196/**
4165 * dev_set_mtu - Change maximum transfer unit 4197 * dev_set_mtu - Change maximum transfer unit
@@ -4193,6 +4225,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
4193 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4225 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4194 return err; 4226 return err;
4195} 4227}
4228EXPORT_SYMBOL(dev_set_mtu);
4196 4229
4197/** 4230/**
4198 * dev_set_mac_address - Change Media Access Control Address 4231 * dev_set_mac_address - Change Media Access Control Address
@@ -4217,6 +4250,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4217 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4250 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4218 return err; 4251 return err;
4219} 4252}
4253EXPORT_SYMBOL(dev_set_mac_address);
4220 4254
4221/* 4255/*
4222 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) 4256 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
@@ -4230,56 +4264,56 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
4230 return -ENODEV; 4264 return -ENODEV;
4231 4265
4232 switch (cmd) { 4266 switch (cmd) {
4233 case SIOCGIFFLAGS: /* Get interface flags */ 4267 case SIOCGIFFLAGS: /* Get interface flags */
4234 ifr->ifr_flags = (short) dev_get_flags(dev); 4268 ifr->ifr_flags = (short) dev_get_flags(dev);
4235 return 0; 4269 return 0;
4236 4270
4237 case SIOCGIFMETRIC: /* Get the metric on the interface 4271 case SIOCGIFMETRIC: /* Get the metric on the interface
4238 (currently unused) */ 4272 (currently unused) */
4239 ifr->ifr_metric = 0; 4273 ifr->ifr_metric = 0;
4240 return 0; 4274 return 0;
4241 4275
4242 case SIOCGIFMTU: /* Get the MTU of a device */ 4276 case SIOCGIFMTU: /* Get the MTU of a device */
4243 ifr->ifr_mtu = dev->mtu; 4277 ifr->ifr_mtu = dev->mtu;
4244 return 0; 4278 return 0;
4245 4279
4246 case SIOCGIFHWADDR: 4280 case SIOCGIFHWADDR:
4247 if (!dev->addr_len) 4281 if (!dev->addr_len)
4248 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); 4282 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4249 else 4283 else
4250 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, 4284 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4251 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4285 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4252 ifr->ifr_hwaddr.sa_family = dev->type; 4286 ifr->ifr_hwaddr.sa_family = dev->type;
4253 return 0; 4287 return 0;
4254 4288
4255 case SIOCGIFSLAVE: 4289 case SIOCGIFSLAVE:
4256 err = -EINVAL; 4290 err = -EINVAL;
4257 break; 4291 break;
4258 4292
4259 case SIOCGIFMAP: 4293 case SIOCGIFMAP:
4260 ifr->ifr_map.mem_start = dev->mem_start; 4294 ifr->ifr_map.mem_start = dev->mem_start;
4261 ifr->ifr_map.mem_end = dev->mem_end; 4295 ifr->ifr_map.mem_end = dev->mem_end;
4262 ifr->ifr_map.base_addr = dev->base_addr; 4296 ifr->ifr_map.base_addr = dev->base_addr;
4263 ifr->ifr_map.irq = dev->irq; 4297 ifr->ifr_map.irq = dev->irq;
4264 ifr->ifr_map.dma = dev->dma; 4298 ifr->ifr_map.dma = dev->dma;
4265 ifr->ifr_map.port = dev->if_port; 4299 ifr->ifr_map.port = dev->if_port;
4266 return 0; 4300 return 0;
4267 4301
4268 case SIOCGIFINDEX: 4302 case SIOCGIFINDEX:
4269 ifr->ifr_ifindex = dev->ifindex; 4303 ifr->ifr_ifindex = dev->ifindex;
4270 return 0; 4304 return 0;
4271 4305
4272 case SIOCGIFTXQLEN: 4306 case SIOCGIFTXQLEN:
4273 ifr->ifr_qlen = dev->tx_queue_len; 4307 ifr->ifr_qlen = dev->tx_queue_len;
4274 return 0; 4308 return 0;
4275 4309
4276 default: 4310 default:
4277 /* dev_ioctl() should ensure this case 4311 /* dev_ioctl() should ensure this case
4278 * is never reached 4312 * is never reached
4279 */ 4313 */
4280 WARN_ON(1); 4314 WARN_ON(1);
4281 err = -EINVAL; 4315 err = -EINVAL;
4282 break; 4316 break;
4283 4317
4284 } 4318 }
4285 return err; 4319 return err;
@@ -4300,92 +4334,91 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4300 ops = dev->netdev_ops; 4334 ops = dev->netdev_ops;
4301 4335
4302 switch (cmd) { 4336 switch (cmd) {
4303 case SIOCSIFFLAGS: /* Set interface flags */ 4337 case SIOCSIFFLAGS: /* Set interface flags */
4304 return dev_change_flags(dev, ifr->ifr_flags); 4338 return dev_change_flags(dev, ifr->ifr_flags);
4305
4306 case SIOCSIFMETRIC: /* Set the metric on the interface
4307 (currently unused) */
4308 return -EOPNOTSUPP;
4309 4339
4310 case SIOCSIFMTU: /* Set the MTU of a device */ 4340 case SIOCSIFMETRIC: /* Set the metric on the interface
4311 return dev_set_mtu(dev, ifr->ifr_mtu); 4341 (currently unused) */
4342 return -EOPNOTSUPP;
4312 4343
4313 case SIOCSIFHWADDR: 4344 case SIOCSIFMTU: /* Set the MTU of a device */
4314 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 4345 return dev_set_mtu(dev, ifr->ifr_mtu);
4315 4346
4316 case SIOCSIFHWBROADCAST: 4347 case SIOCSIFHWADDR:
4317 if (ifr->ifr_hwaddr.sa_family != dev->type) 4348 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4318 return -EINVAL;
4319 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4320 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4321 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4322 return 0;
4323
4324 case SIOCSIFMAP:
4325 if (ops->ndo_set_config) {
4326 if (!netif_device_present(dev))
4327 return -ENODEV;
4328 return ops->ndo_set_config(dev, &ifr->ifr_map);
4329 }
4330 return -EOPNOTSUPP;
4331 4349
4332 case SIOCADDMULTI: 4350 case SIOCSIFHWBROADCAST:
4333 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4351 if (ifr->ifr_hwaddr.sa_family != dev->type)
4334 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4352 return -EINVAL;
4335 return -EINVAL; 4353 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4336 if (!netif_device_present(dev)) 4354 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4337 return -ENODEV; 4355 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4338 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, 4356 return 0;
4339 dev->addr_len, 1);
4340 4357
4341 case SIOCDELMULTI: 4358 case SIOCSIFMAP:
4342 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4359 if (ops->ndo_set_config) {
4343 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4344 return -EINVAL;
4345 if (!netif_device_present(dev)) 4360 if (!netif_device_present(dev))
4346 return -ENODEV; 4361 return -ENODEV;
4347 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, 4362 return ops->ndo_set_config(dev, &ifr->ifr_map);
4348 dev->addr_len, 1); 4363 }
4364 return -EOPNOTSUPP;
4349 4365
4350 case SIOCSIFTXQLEN: 4366 case SIOCADDMULTI:
4351 if (ifr->ifr_qlen < 0) 4367 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4352 return -EINVAL; 4368 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4353 dev->tx_queue_len = ifr->ifr_qlen; 4369 return -EINVAL;
4354 return 0; 4370 if (!netif_device_present(dev))
4371 return -ENODEV;
4372 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4373 dev->addr_len, 1);
4374
4375 case SIOCDELMULTI:
4376 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4377 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4378 return -EINVAL;
4379 if (!netif_device_present(dev))
4380 return -ENODEV;
4381 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4382 dev->addr_len, 1);
4355 4383
4356 case SIOCSIFNAME: 4384 case SIOCSIFTXQLEN:
4357 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 4385 if (ifr->ifr_qlen < 0)
4358 return dev_change_name(dev, ifr->ifr_newname); 4386 return -EINVAL;
4387 dev->tx_queue_len = ifr->ifr_qlen;
4388 return 0;
4359 4389
4360 /* 4390 case SIOCSIFNAME:
4361 * Unknown or private ioctl 4391 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4362 */ 4392 return dev_change_name(dev, ifr->ifr_newname);
4363 4393
4364 default: 4394 /*
4365 if ((cmd >= SIOCDEVPRIVATE && 4395 * Unknown or private ioctl
4366 cmd <= SIOCDEVPRIVATE + 15) || 4396 */
4367 cmd == SIOCBONDENSLAVE || 4397 default:
4368 cmd == SIOCBONDRELEASE || 4398 if ((cmd >= SIOCDEVPRIVATE &&
4369 cmd == SIOCBONDSETHWADDR || 4399 cmd <= SIOCDEVPRIVATE + 15) ||
4370 cmd == SIOCBONDSLAVEINFOQUERY || 4400 cmd == SIOCBONDENSLAVE ||
4371 cmd == SIOCBONDINFOQUERY || 4401 cmd == SIOCBONDRELEASE ||
4372 cmd == SIOCBONDCHANGEACTIVE || 4402 cmd == SIOCBONDSETHWADDR ||
4373 cmd == SIOCGMIIPHY || 4403 cmd == SIOCBONDSLAVEINFOQUERY ||
4374 cmd == SIOCGMIIREG || 4404 cmd == SIOCBONDINFOQUERY ||
4375 cmd == SIOCSMIIREG || 4405 cmd == SIOCBONDCHANGEACTIVE ||
4376 cmd == SIOCBRADDIF || 4406 cmd == SIOCGMIIPHY ||
4377 cmd == SIOCBRDELIF || 4407 cmd == SIOCGMIIREG ||
4378 cmd == SIOCSHWTSTAMP || 4408 cmd == SIOCSMIIREG ||
4379 cmd == SIOCWANDEV) { 4409 cmd == SIOCBRADDIF ||
4380 err = -EOPNOTSUPP; 4410 cmd == SIOCBRDELIF ||
4381 if (ops->ndo_do_ioctl) { 4411 cmd == SIOCSHWTSTAMP ||
4382 if (netif_device_present(dev)) 4412 cmd == SIOCWANDEV) {
4383 err = ops->ndo_do_ioctl(dev, ifr, cmd); 4413 err = -EOPNOTSUPP;
4384 else 4414 if (ops->ndo_do_ioctl) {
4385 err = -ENODEV; 4415 if (netif_device_present(dev))
4386 } 4416 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4387 } else 4417 else
4388 err = -EINVAL; 4418 err = -ENODEV;
4419 }
4420 } else
4421 err = -EINVAL;
4389 4422
4390 } 4423 }
4391 return err; 4424 return err;
@@ -4442,135 +4475,135 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4442 */ 4475 */
4443 4476
4444 switch (cmd) { 4477 switch (cmd) {
4445 /* 4478 /*
4446 * These ioctl calls: 4479 * These ioctl calls:
4447 * - can be done by all. 4480 * - can be done by all.
4448 * - atomic and do not require locking. 4481 * - atomic and do not require locking.
4449 * - return a value 4482 * - return a value
4450 */ 4483 */
4451 case SIOCGIFFLAGS: 4484 case SIOCGIFFLAGS:
4452 case SIOCGIFMETRIC: 4485 case SIOCGIFMETRIC:
4453 case SIOCGIFMTU: 4486 case SIOCGIFMTU:
4454 case SIOCGIFHWADDR: 4487 case SIOCGIFHWADDR:
4455 case SIOCGIFSLAVE: 4488 case SIOCGIFSLAVE:
4456 case SIOCGIFMAP: 4489 case SIOCGIFMAP:
4457 case SIOCGIFINDEX: 4490 case SIOCGIFINDEX:
4458 case SIOCGIFTXQLEN: 4491 case SIOCGIFTXQLEN:
4459 dev_load(net, ifr.ifr_name); 4492 dev_load(net, ifr.ifr_name);
4460 read_lock(&dev_base_lock); 4493 read_lock(&dev_base_lock);
4461 ret = dev_ifsioc_locked(net, &ifr, cmd); 4494 ret = dev_ifsioc_locked(net, &ifr, cmd);
4462 read_unlock(&dev_base_lock); 4495 read_unlock(&dev_base_lock);
4463 if (!ret) { 4496 if (!ret) {
4464 if (colon) 4497 if (colon)
4465 *colon = ':'; 4498 *colon = ':';
4466 if (copy_to_user(arg, &ifr, 4499 if (copy_to_user(arg, &ifr,
4467 sizeof(struct ifreq))) 4500 sizeof(struct ifreq)))
4468 ret = -EFAULT; 4501 ret = -EFAULT;
4469 } 4502 }
4470 return ret; 4503 return ret;
4471 4504
4472 case SIOCETHTOOL: 4505 case SIOCETHTOOL:
4473 dev_load(net, ifr.ifr_name); 4506 dev_load(net, ifr.ifr_name);
4474 rtnl_lock(); 4507 rtnl_lock();
4475 ret = dev_ethtool(net, &ifr); 4508 ret = dev_ethtool(net, &ifr);
4476 rtnl_unlock(); 4509 rtnl_unlock();
4477 if (!ret) { 4510 if (!ret) {
4478 if (colon) 4511 if (colon)
4479 *colon = ':'; 4512 *colon = ':';
4480 if (copy_to_user(arg, &ifr, 4513 if (copy_to_user(arg, &ifr,
4481 sizeof(struct ifreq))) 4514 sizeof(struct ifreq)))
4482 ret = -EFAULT; 4515 ret = -EFAULT;
4483 } 4516 }
4484 return ret; 4517 return ret;
4485 4518
4486 /* 4519 /*
4487 * These ioctl calls: 4520 * These ioctl calls:
4488 * - require superuser power. 4521 * - require superuser power.
4489 * - require strict serialization. 4522 * - require strict serialization.
4490 * - return a value 4523 * - return a value
4491 */ 4524 */
4492 case SIOCGMIIPHY: 4525 case SIOCGMIIPHY:
4493 case SIOCGMIIREG: 4526 case SIOCGMIIREG:
4494 case SIOCSIFNAME: 4527 case SIOCSIFNAME:
4495 if (!capable(CAP_NET_ADMIN)) 4528 if (!capable(CAP_NET_ADMIN))
4496 return -EPERM; 4529 return -EPERM;
4497 dev_load(net, ifr.ifr_name); 4530 dev_load(net, ifr.ifr_name);
4498 rtnl_lock(); 4531 rtnl_lock();
4499 ret = dev_ifsioc(net, &ifr, cmd); 4532 ret = dev_ifsioc(net, &ifr, cmd);
4500 rtnl_unlock(); 4533 rtnl_unlock();
4501 if (!ret) { 4534 if (!ret) {
4502 if (colon) 4535 if (colon)
4503 *colon = ':'; 4536 *colon = ':';
4504 if (copy_to_user(arg, &ifr, 4537 if (copy_to_user(arg, &ifr,
4505 sizeof(struct ifreq))) 4538 sizeof(struct ifreq)))
4506 ret = -EFAULT; 4539 ret = -EFAULT;
4507 } 4540 }
4508 return ret; 4541 return ret;
4509 4542
4510 /* 4543 /*
4511 * These ioctl calls: 4544 * These ioctl calls:
4512 * - require superuser power. 4545 * - require superuser power.
4513 * - require strict serialization. 4546 * - require strict serialization.
4514 * - do not return a value 4547 * - do not return a value
4515 */ 4548 */
4516 case SIOCSIFFLAGS: 4549 case SIOCSIFFLAGS:
4517 case SIOCSIFMETRIC: 4550 case SIOCSIFMETRIC:
4518 case SIOCSIFMTU: 4551 case SIOCSIFMTU:
4519 case SIOCSIFMAP: 4552 case SIOCSIFMAP:
4520 case SIOCSIFHWADDR: 4553 case SIOCSIFHWADDR:
4521 case SIOCSIFSLAVE: 4554 case SIOCSIFSLAVE:
4522 case SIOCADDMULTI: 4555 case SIOCADDMULTI:
4523 case SIOCDELMULTI: 4556 case SIOCDELMULTI:
4524 case SIOCSIFHWBROADCAST: 4557 case SIOCSIFHWBROADCAST:
4525 case SIOCSIFTXQLEN: 4558 case SIOCSIFTXQLEN:
4526 case SIOCSMIIREG: 4559 case SIOCSMIIREG:
4527 case SIOCBONDENSLAVE: 4560 case SIOCBONDENSLAVE:
4528 case SIOCBONDRELEASE: 4561 case SIOCBONDRELEASE:
4529 case SIOCBONDSETHWADDR: 4562 case SIOCBONDSETHWADDR:
4530 case SIOCBONDCHANGEACTIVE: 4563 case SIOCBONDCHANGEACTIVE:
4531 case SIOCBRADDIF: 4564 case SIOCBRADDIF:
4532 case SIOCBRDELIF: 4565 case SIOCBRDELIF:
4533 case SIOCSHWTSTAMP: 4566 case SIOCSHWTSTAMP:
4534 if (!capable(CAP_NET_ADMIN)) 4567 if (!capable(CAP_NET_ADMIN))
4535 return -EPERM; 4568 return -EPERM;
4536 /* fall through */ 4569 /* fall through */
4537 case SIOCBONDSLAVEINFOQUERY: 4570 case SIOCBONDSLAVEINFOQUERY:
4538 case SIOCBONDINFOQUERY: 4571 case SIOCBONDINFOQUERY:
4572 dev_load(net, ifr.ifr_name);
4573 rtnl_lock();
4574 ret = dev_ifsioc(net, &ifr, cmd);
4575 rtnl_unlock();
4576 return ret;
4577
4578 case SIOCGIFMEM:
4579 /* Get the per device memory space. We can add this but
4580 * currently do not support it */
4581 case SIOCSIFMEM:
4582 /* Set the per device memory buffer space.
4583 * Not applicable in our case */
4584 case SIOCSIFLINK:
4585 return -EINVAL;
4586
4587 /*
4588 * Unknown or private ioctl.
4589 */
4590 default:
4591 if (cmd == SIOCWANDEV ||
4592 (cmd >= SIOCDEVPRIVATE &&
4593 cmd <= SIOCDEVPRIVATE + 15)) {
4539 dev_load(net, ifr.ifr_name); 4594 dev_load(net, ifr.ifr_name);
4540 rtnl_lock(); 4595 rtnl_lock();
4541 ret = dev_ifsioc(net, &ifr, cmd); 4596 ret = dev_ifsioc(net, &ifr, cmd);
4542 rtnl_unlock(); 4597 rtnl_unlock();
4598 if (!ret && copy_to_user(arg, &ifr,
4599 sizeof(struct ifreq)))
4600 ret = -EFAULT;
4543 return ret; 4601 return ret;
4544 4602 }
4545 case SIOCGIFMEM: 4603 /* Take care of Wireless Extensions */
4546 /* Get the per device memory space. We can add this but 4604 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4547 * currently do not support it */ 4605 return wext_handle_ioctl(net, &ifr, cmd, arg);
4548 case SIOCSIFMEM: 4606 return -EINVAL;
4549 /* Set the per device memory buffer space.
4550 * Not applicable in our case */
4551 case SIOCSIFLINK:
4552 return -EINVAL;
4553
4554 /*
4555 * Unknown or private ioctl.
4556 */
4557 default:
4558 if (cmd == SIOCWANDEV ||
4559 (cmd >= SIOCDEVPRIVATE &&
4560 cmd <= SIOCDEVPRIVATE + 15)) {
4561 dev_load(net, ifr.ifr_name);
4562 rtnl_lock();
4563 ret = dev_ifsioc(net, &ifr, cmd);
4564 rtnl_unlock();
4565 if (!ret && copy_to_user(arg, &ifr,
4566 sizeof(struct ifreq)))
4567 ret = -EFAULT;
4568 return ret;
4569 }
4570 /* Take care of Wireless Extensions */
4571 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4572 return wext_handle_ioctl(net, &ifr, cmd, arg);
4573 return -EINVAL;
4574 } 4607 }
4575} 4608}
4576 4609
@@ -4835,6 +4868,7 @@ err_uninit:
4835 dev->netdev_ops->ndo_uninit(dev); 4868 dev->netdev_ops->ndo_uninit(dev);
4836 goto out; 4869 goto out;
4837} 4870}
4871EXPORT_SYMBOL(register_netdevice);
4838 4872
4839/** 4873/**
4840 * init_dummy_netdev - init a dummy network device for NAPI 4874 * init_dummy_netdev - init a dummy network device for NAPI
@@ -5187,6 +5221,7 @@ void free_netdev(struct net_device *dev)
5187 /* will free via device release */ 5221 /* will free via device release */
5188 put_device(&dev->dev); 5222 put_device(&dev->dev);
5189} 5223}
5224EXPORT_SYMBOL(free_netdev);
5190 5225
5191/** 5226/**
5192 * synchronize_net - Synchronize with packet receive processing 5227 * synchronize_net - Synchronize with packet receive processing
@@ -5199,6 +5234,7 @@ void synchronize_net(void)
5199 might_sleep(); 5234 might_sleep();
5200 synchronize_rcu(); 5235 synchronize_rcu();
5201} 5236}
5237EXPORT_SYMBOL(synchronize_net);
5202 5238
5203/** 5239/**
5204 * unregister_netdevice - remove device from the kernel 5240 * unregister_netdevice - remove device from the kernel
@@ -5219,6 +5255,7 @@ void unregister_netdevice(struct net_device *dev)
5219 /* Finish processing unregister after unlock */ 5255 /* Finish processing unregister after unlock */
5220 net_set_todo(dev); 5256 net_set_todo(dev);
5221} 5257}
5258EXPORT_SYMBOL(unregister_netdevice);
5222 5259
5223/** 5260/**
5224 * unregister_netdev - remove device from the kernel 5261 * unregister_netdev - remove device from the kernel
@@ -5237,7 +5274,6 @@ void unregister_netdev(struct net_device *dev)
5237 unregister_netdevice(dev); 5274 unregister_netdevice(dev);
5238 rtnl_unlock(); 5275 rtnl_unlock();
5239} 5276}
5240
5241EXPORT_SYMBOL(unregister_netdev); 5277EXPORT_SYMBOL(unregister_netdev);
5242 5278
5243/** 5279/**
@@ -5427,7 +5463,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5427 unsigned long mask) 5463 unsigned long mask)
5428{ 5464{
5429 /* If device needs checksumming, downgrade to it. */ 5465 /* If device needs checksumming, downgrade to it. */
5430 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 5466 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5431 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 5467 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5432 else if (mask & NETIF_F_ALL_CSUM) { 5468 else if (mask & NETIF_F_ALL_CSUM) {
5433 /* If one device supports v4/v6 checksumming, set for all. */ 5469 /* If one device supports v4/v6 checksumming, set for all. */
@@ -5653,41 +5689,3 @@ static int __init initialize_hashrnd(void)
5653 5689
5654late_initcall_sync(initialize_hashrnd); 5690late_initcall_sync(initialize_hashrnd);
5655 5691
5656EXPORT_SYMBOL(__dev_get_by_index);
5657EXPORT_SYMBOL(__dev_get_by_name);
5658EXPORT_SYMBOL(__dev_remove_pack);
5659EXPORT_SYMBOL(dev_valid_name);
5660EXPORT_SYMBOL(dev_add_pack);
5661EXPORT_SYMBOL(dev_alloc_name);
5662EXPORT_SYMBOL(dev_close);
5663EXPORT_SYMBOL(dev_get_by_flags);
5664EXPORT_SYMBOL(dev_get_by_index);
5665EXPORT_SYMBOL(dev_get_by_name);
5666EXPORT_SYMBOL(dev_open);
5667EXPORT_SYMBOL(dev_queue_xmit);
5668EXPORT_SYMBOL(dev_remove_pack);
5669EXPORT_SYMBOL(dev_set_allmulti);
5670EXPORT_SYMBOL(dev_set_promiscuity);
5671EXPORT_SYMBOL(dev_change_flags);
5672EXPORT_SYMBOL(dev_set_mtu);
5673EXPORT_SYMBOL(dev_set_mac_address);
5674EXPORT_SYMBOL(free_netdev);
5675EXPORT_SYMBOL(netdev_boot_setup_check);
5676EXPORT_SYMBOL(netdev_set_master);
5677EXPORT_SYMBOL(netdev_state_change);
5678EXPORT_SYMBOL(netif_receive_skb);
5679EXPORT_SYMBOL(netif_rx);
5680EXPORT_SYMBOL(register_gifconf);
5681EXPORT_SYMBOL(register_netdevice);
5682EXPORT_SYMBOL(register_netdevice_notifier);
5683EXPORT_SYMBOL(skb_checksum_help);
5684EXPORT_SYMBOL(synchronize_net);
5685EXPORT_SYMBOL(unregister_netdevice);
5686EXPORT_SYMBOL(unregister_netdevice_notifier);
5687EXPORT_SYMBOL(net_enable_timestamp);
5688EXPORT_SYMBOL(net_disable_timestamp);
5689EXPORT_SYMBOL(dev_get_flags);
5690
5691EXPORT_SYMBOL(dev_load);
5692
5693EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9d66fa953ab7..0a113f26bc9f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -52,6 +52,7 @@ struct per_cpu_dm_data {
52 52
53struct dm_hw_stat_delta { 53struct dm_hw_stat_delta {
54 struct net_device *dev; 54 struct net_device *dev;
55 unsigned long last_rx;
55 struct list_head list; 56 struct list_head list;
56 struct rcu_head rcu; 57 struct rcu_head rcu;
57 unsigned long last_drop_val; 58 unsigned long last_drop_val;
@@ -180,17 +181,25 @@ static void trace_napi_poll_hit(struct napi_struct *napi)
180 struct dm_hw_stat_delta *new_stat; 181 struct dm_hw_stat_delta *new_stat;
181 182
182 /* 183 /*
183 * Ratelimit our check time to dm_hw_check_delta jiffies 184 * Don't check napi structures with no associated device
184 */ 185 */
185 if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta)) 186 if (!napi->dev)
186 return; 187 return;
187 188
188 rcu_read_lock(); 189 rcu_read_lock();
189 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { 190 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
191 /*
192 * only add a note to our monitor buffer if:
193 * 1) this is the dev we received on
194 * 2) its after the last_rx delta
195 * 3) our rx_dropped count has gone up
196 */
190 if ((new_stat->dev == napi->dev) && 197 if ((new_stat->dev == napi->dev) &&
198 (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
191 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { 199 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
192 trace_drop_common(NULL, NULL); 200 trace_drop_common(NULL, NULL);
193 new_stat->last_drop_val = napi->dev->stats.rx_dropped; 201 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
202 new_stat->last_rx = jiffies;
194 break; 203 break;
195 } 204 }
196 } 205 }
@@ -286,6 +295,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
286 goto out; 295 goto out;
287 296
288 new_stat->dev = dev; 297 new_stat->dev = dev;
298 new_stat->last_rx = jiffies;
289 INIT_RCU_HEAD(&new_stat->rcu); 299 INIT_RCU_HEAD(&new_stat->rcu);
290 spin_lock(&trace_state_lock); 300 spin_lock(&trace_state_lock);
291 list_add_rcu(&new_stat->list, &hw_stats_list); 301 list_add_rcu(&new_stat->list, &hw_stats_list);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 44e571111d3a..4c12ddb5f5ee 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -898,6 +898,19 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
898 return actor(dev, edata.data); 898 return actor(dev, edata.data);
899} 899}
900 900
901static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
902{
903 struct ethtool_flash efl;
904
905 if (copy_from_user(&efl, useraddr, sizeof(efl)))
906 return -EFAULT;
907
908 if (!dev->ethtool_ops->flash_device)
909 return -EOPNOTSUPP;
910
911 return dev->ethtool_ops->flash_device(dev, &efl);
912}
913
901/* The main entry point in this file. Called from net/core/dev.c */ 914/* The main entry point in this file. Called from net/core/dev.c */
902 915
903int dev_ethtool(struct net *net, struct ifreq *ifr) 916int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1111,6 +1124,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1111 case ETHTOOL_SGRO: 1124 case ETHTOOL_SGRO:
1112 rc = ethtool_set_gro(dev, useraddr); 1125 rc = ethtool_set_gro(dev, useraddr);
1113 break; 1126 break;
1127 case ETHTOOL_FLASHDEV:
1128 rc = ethtool_flash_device(dev, useraddr);
1129 break;
1114 default: 1130 default:
1115 rc = -EOPNOTSUPP; 1131 rc = -EOPNOTSUPP;
1116 } 1132 }
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 78e5bfc454ae..493775f4f2f1 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -81,7 +81,7 @@
81struct gen_estimator 81struct gen_estimator
82{ 82{
83 struct list_head list; 83 struct list_head list;
84 struct gnet_stats_basic *bstats; 84 struct gnet_stats_basic_packed *bstats;
85 struct gnet_stats_rate_est *rate_est; 85 struct gnet_stats_rate_est *rate_est;
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
@@ -165,7 +165,7 @@ static void gen_add_node(struct gen_estimator *est)
165} 165}
166 166
167static 167static
168struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats, 168struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
169 const struct gnet_stats_rate_est *rate_est) 169 const struct gnet_stats_rate_est *rate_est)
170{ 170{
171 struct rb_node *p = est_root.rb_node; 171 struct rb_node *p = est_root.rb_node;
@@ -202,7 +202,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
202 * 202 *
203 * NOTE: Called under rtnl_mutex 203 * NOTE: Called under rtnl_mutex
204 */ 204 */
205int gen_new_estimator(struct gnet_stats_basic *bstats, 205int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
206 struct gnet_stats_rate_est *rate_est, 206 struct gnet_stats_rate_est *rate_est,
207 spinlock_t *stats_lock, 207 spinlock_t *stats_lock,
208 struct nlattr *opt) 208 struct nlattr *opt)
@@ -262,7 +262,7 @@ static void __gen_kill_estimator(struct rcu_head *head)
262 * 262 *
263 * NOTE: Called under rtnl_mutex 263 * NOTE: Called under rtnl_mutex
264 */ 264 */
265void gen_kill_estimator(struct gnet_stats_basic *bstats, 265void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
266 struct gnet_stats_rate_est *rate_est) 266 struct gnet_stats_rate_est *rate_est)
267{ 267{
268 struct gen_estimator *e; 268 struct gen_estimator *e;
@@ -292,7 +292,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
292 * 292 *
293 * Returns 0 on success or a negative error code. 293 * Returns 0 on success or a negative error code.
294 */ 294 */
295int gen_replace_estimator(struct gnet_stats_basic *bstats, 295int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
296 struct gnet_stats_rate_est *rate_est, 296 struct gnet_stats_rate_est *rate_est,
297 spinlock_t *stats_lock, struct nlattr *opt) 297 spinlock_t *stats_lock, struct nlattr *opt)
298{ 298{
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(gen_replace_estimator);
308 * 308 *
309 * Returns true if estimator is active, and false if not. 309 * Returns true if estimator is active, and false if not.
310 */ 310 */
311bool gen_estimator_active(const struct gnet_stats_basic *bstats, 311bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
312 const struct gnet_stats_rate_est *rate_est) 312 const struct gnet_stats_rate_est *rate_est)
313{ 313{
314 ASSERT_RTNL(); 314 ASSERT_RTNL();
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index c3d0ffeac243..8569310268ab 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -106,16 +106,21 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
106 * if the room in the socket buffer was not sufficient. 106 * if the room in the socket buffer was not sufficient.
107 */ 107 */
108int 108int
109gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic *b) 109gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
110{ 110{
111 if (d->compat_tc_stats) { 111 if (d->compat_tc_stats) {
112 d->tc_stats.bytes = b->bytes; 112 d->tc_stats.bytes = b->bytes;
113 d->tc_stats.packets = b->packets; 113 d->tc_stats.packets = b->packets;
114 } 114 }
115 115
116 if (d->tail) 116 if (d->tail) {
117 return gnet_stats_copy(d, TCA_STATS_BASIC, b, sizeof(*b)); 117 struct gnet_stats_basic sb;
118 118
119 memset(&sb, 0, sizeof(sb));
120 sb.bytes = b->bytes;
121 sb.packets = b->packets;
122 return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
123 }
119 return 0; 124 return 0;
120} 125}
121 126
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ddd2cd2b1775..1c1af2756f38 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -517,7 +517,7 @@ int net_assign_generic(struct net *net, int id, void *data)
517 */ 517 */
518 518
519 ng->len = id; 519 ng->len = id;
520 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); 520 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
521 521
522 rcu_assign_pointer(net->gen, ng); 522 rcu_assign_pointer(net->gen, ng);
523 call_rcu(&old_ng->rcu, net_generic_release); 523 call_rcu(&old_ng->rcu, net_generic_release);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0ac309154b0d..0b4d0d35ef40 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -323,6 +323,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
323 323
324 udelay(USEC_PER_POLL); 324 udelay(USEC_PER_POLL);
325 } 325 }
326
327 WARN_ONCE(!irqs_disabled(),
328 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
329 dev->name, ops->ndo_start_xmit);
330
326 local_irq_restore(flags); 331 local_irq_restore(flags);
327 } 332 }
328 333
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 19b8c20e98a4..0bcecbf06581 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -131,6 +131,7 @@
131#include <linux/ioport.h> 131#include <linux/ioport.h>
132#include <linux/interrupt.h> 132#include <linux/interrupt.h>
133#include <linux/capability.h> 133#include <linux/capability.h>
134#include <linux/hrtimer.h>
134#include <linux/freezer.h> 135#include <linux/freezer.h>
135#include <linux/delay.h> 136#include <linux/delay.h>
136#include <linux/timer.h> 137#include <linux/timer.h>
@@ -162,14 +163,13 @@
162#include <asm/byteorder.h> 163#include <asm/byteorder.h>
163#include <linux/rcupdate.h> 164#include <linux/rcupdate.h>
164#include <linux/bitops.h> 165#include <linux/bitops.h>
165#include <asm/io.h> 166#include <linux/io.h>
167#include <linux/timex.h>
168#include <linux/uaccess.h>
166#include <asm/dma.h> 169#include <asm/dma.h>
167#include <asm/uaccess.h>
168#include <asm/div64.h> /* do_div */ 170#include <asm/div64.h> /* do_div */
169#include <asm/timex.h>
170
171#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n"
172 171
172#define VERSION "2.72"
173#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
175#define MPLS_STACK_BOTTOM htonl(0x00000100) 175#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -206,7 +206,7 @@
206#define PKTGEN_MAGIC 0xbe9be955 206#define PKTGEN_MAGIC 0xbe9be955
207#define PG_PROC_DIR "pktgen" 207#define PG_PROC_DIR "pktgen"
208#define PGCTRL "pgctrl" 208#define PGCTRL "pgctrl"
209static struct proc_dir_entry *pg_proc_dir = NULL; 209static struct proc_dir_entry *pg_proc_dir;
210 210
211#define MAX_CFLOWS 65536 211#define MAX_CFLOWS 65536
212 212
@@ -231,9 +231,9 @@ struct pktgen_dev {
231 */ 231 */
232 struct proc_dir_entry *entry; /* proc file */ 232 struct proc_dir_entry *entry; /* proc file */
233 struct pktgen_thread *pg_thread;/* the owner */ 233 struct pktgen_thread *pg_thread;/* the owner */
234 struct list_head list; /* Used for chaining in the thread's run-queue */ 234 struct list_head list; /* chaining in the thread's run-queue */
235 235
236 int running; /* if this changes to false, the test will stop */ 236 int running; /* if false, the test will stop */
237 237
238 /* If min != max, then we will either do a linear iteration, or 238 /* If min != max, then we will either do a linear iteration, or
239 * we will do a random selection from within the range. 239 * we will do a random selection from within the range.
@@ -246,33 +246,37 @@ struct pktgen_dev {
246 int max_pkt_size; /* = ETH_ZLEN; */ 246 int max_pkt_size; /* = ETH_ZLEN; */
247 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 247 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
248 int nfrags; 248 int nfrags;
249 __u32 delay_us; /* Default delay */ 249 u64 delay; /* nano-seconds */
250 __u32 delay_ns; 250
251 __u64 count; /* Default No packets to send */ 251 __u64 count; /* Default No packets to send */
252 __u64 sofar; /* How many pkts we've sent so far */ 252 __u64 sofar; /* How many pkts we've sent so far */
253 __u64 tx_bytes; /* How many bytes we've transmitted */ 253 __u64 tx_bytes; /* How many bytes we've transmitted */
254 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ 254 __u64 errors; /* Errors when trying to transmit,
255 pkts will be re-sent */
255 256
256 /* runtime counters relating to clone_skb */ 257 /* runtime counters relating to clone_skb */
257 __u64 next_tx_us; /* timestamp of when to tx next */
258 __u32 next_tx_ns;
259 258
260 __u64 allocated_skbs; 259 __u64 allocated_skbs;
261 __u32 clone_count; 260 __u32 clone_count;
262 int last_ok; /* Was last skb sent? 261 int last_ok; /* Was last skb sent?
263 * Or a failed transmit of some sort? This will keep 262 * Or a failed transmit of some sort?
264 * sequence numbers in order, for example. 263 * This will keep sequence numbers in order
265 */ 264 */
266 __u64 started_at; /* micro-seconds */ 265 ktime_t next_tx;
267 __u64 stopped_at; /* micro-seconds */ 266 ktime_t started_at;
268 __u64 idle_acc; /* micro-seconds */ 267 ktime_t stopped_at;
268 u64 idle_acc; /* nano-seconds */
269
269 __u32 seq_num; 270 __u32 seq_num;
270 271
271 int clone_skb; /* Use multiple SKBs during packet gen. If this number 272 int clone_skb; /*
272 * is greater than 1, then that many copies of the same 273 * Use multiple SKBs during packet gen.
273 * packet will be sent before a new packet is allocated. 274 * If this number is greater than 1, then
274 * For instance, if you want to send 1024 identical packets 275 * that many copies of the same packet will be
275 * before creating a new packet, set clone_skb to 1024. 276 * sent before a new packet is allocated.
277 * If you want to send 1024 identical packets
278 * before creating a new packet,
279 * set clone_skb to 1024.
276 */ 280 */
277 281
278 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 282 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
@@ -304,8 +308,10 @@ struct pktgen_dev {
304 __u16 udp_dst_max; /* exclusive, dest UDP port */ 308 __u16 udp_dst_max; /* exclusive, dest UDP port */
305 309
306 /* DSCP + ECN */ 310 /* DSCP + ECN */
307 __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */ 311 __u8 tos; /* six MSB of (former) IPv4 TOS
308 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ 312 are for dscp codepoint */
313 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6
314 (see RFC 3260, sec. 4) */
309 315
310 /* MPLS */ 316 /* MPLS */
311 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 317 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */
@@ -346,15 +352,17 @@ struct pktgen_dev {
346 */ 352 */
347 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 353 __u16 pad; /* pad out the hh struct to an even 16 bytes */
348 354
349 struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we 355 struct sk_buff *skb; /* skb we are to transmit next, used for when we
350 * are transmitting the same one multiple times 356 * are transmitting the same one multiple times
351 */ 357 */
352 struct net_device *odev; /* The out-going device. Note that the device should 358 struct net_device *odev; /* The out-going device.
353 * have it's pg_info pointer pointing back to this 359 * Note that the device should have it's
354 * device. This will be set when the user specifies 360 * pg_info pointer pointing back to this
355 * the out-going device name (not when the inject is 361 * device.
356 * started as it used to do.) 362 * Set when the user specifies the out-going
357 */ 363 * device name (not when the inject is
364 * started as it used to do.)
365 */
358 struct flow_state *flows; 366 struct flow_state *flows;
359 unsigned cflows; /* Concurrent flows (config) */ 367 unsigned cflows; /* Concurrent flows (config) */
360 unsigned lflow; /* Flow length (config) */ 368 unsigned lflow; /* Flow length (config) */
@@ -379,13 +387,14 @@ struct pktgen_hdr {
379}; 387};
380 388
381struct pktgen_thread { 389struct pktgen_thread {
382 spinlock_t if_lock; 390 spinlock_t if_lock; /* for list of devices */
383 struct list_head if_list; /* All device here */ 391 struct list_head if_list; /* All device here */
384 struct list_head th_list; 392 struct list_head th_list;
385 struct task_struct *tsk; 393 struct task_struct *tsk;
386 char result[512]; 394 char result[512];
387 395
388 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 396 /* Field for thread to receive "posted" events terminate,
397 stop ifs etc. */
389 398
390 u32 control; 399 u32 control;
391 int cpu; 400 int cpu;
@@ -397,24 +406,22 @@ struct pktgen_thread {
397#define REMOVE 1 406#define REMOVE 1
398#define FIND 0 407#define FIND 0
399 408
400/** Convert to micro-seconds */ 409static inline ktime_t ktime_now(void)
401static inline __u64 tv_to_us(const struct timeval *tv)
402{ 410{
403 __u64 us = tv->tv_usec; 411 struct timespec ts;
404 us += (__u64) tv->tv_sec * (__u64) 1000000; 412 ktime_get_ts(&ts);
405 return us; 413
414 return timespec_to_ktime(ts);
406} 415}
407 416
408static __u64 getCurUs(void) 417/* This works even if 32 bit because of careful byte order choice */
418static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
409{ 419{
410 struct timeval tv; 420 return cmp1.tv64 < cmp2.tv64;
411 do_gettimeofday(&tv);
412 return tv_to_us(&tv);
413} 421}
414 422
415/* old include end */ 423static const char version[] =
416 424 "pktgen " VERSION ": Packet Generator for packet performance testing.\n";
417static char version[] __initdata = VERSION;
418 425
419static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 426static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
420static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 427static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
@@ -424,7 +431,7 @@ static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
424static void pktgen_run_all_threads(void); 431static void pktgen_run_all_threads(void);
425static void pktgen_reset_all_threads(void); 432static void pktgen_reset_all_threads(void);
426static void pktgen_stop_all_threads_ifs(void); 433static void pktgen_stop_all_threads_ifs(void);
427static int pktgen_stop_device(struct pktgen_dev *pkt_dev); 434
428static void pktgen_stop(struct pktgen_thread *t); 435static void pktgen_stop(struct pktgen_thread *t);
429static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 436static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
430 437
@@ -432,10 +439,10 @@ static unsigned int scan_ip6(const char *s, char ip[16]);
432static unsigned int fmt_ip6(char *s, const char ip[16]); 439static unsigned int fmt_ip6(char *s, const char ip[16]);
433 440
434/* Module parameters, defaults. */ 441/* Module parameters, defaults. */
435static int pg_count_d = 1000; /* 1000 pkts by default */ 442static int pg_count_d __read_mostly = 1000;
436static int pg_delay_d; 443static int pg_delay_d __read_mostly;
437static int pg_clone_skb_d; 444static int pg_clone_skb_d __read_mostly;
438static int debug; 445static int debug __read_mostly;
439 446
440static DEFINE_MUTEX(pktgen_thread_lock); 447static DEFINE_MUTEX(pktgen_thread_lock);
441static LIST_HEAD(pktgen_threads); 448static LIST_HEAD(pktgen_threads);
@@ -451,12 +458,12 @@ static struct notifier_block pktgen_notifier_block = {
451 458
452static int pgctrl_show(struct seq_file *seq, void *v) 459static int pgctrl_show(struct seq_file *seq, void *v)
453{ 460{
454 seq_puts(seq, VERSION); 461 seq_puts(seq, version);
455 return 0; 462 return 0;
456} 463}
457 464
458static ssize_t pgctrl_write(struct file *file, const char __user * buf, 465static ssize_t pgctrl_write(struct file *file, const char __user *buf,
459 size_t count, loff_t * ppos) 466 size_t count, loff_t *ppos)
460{ 467{
461 int err = 0; 468 int err = 0;
462 char data[128]; 469 char data[128];
@@ -509,10 +516,9 @@ static const struct file_operations pktgen_fops = {
509 516
510static int pktgen_if_show(struct seq_file *seq, void *v) 517static int pktgen_if_show(struct seq_file *seq, void *v)
511{ 518{
512 struct pktgen_dev *pkt_dev = seq->private; 519 const struct pktgen_dev *pkt_dev = seq->private;
513 __u64 sa; 520 ktime_t stopped;
514 __u64 stopped; 521 u64 idle;
515 __u64 now = getCurUs();
516 522
517 seq_printf(seq, 523 seq_printf(seq,
518 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 524 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
@@ -520,9 +526,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
520 pkt_dev->max_pkt_size); 526 pkt_dev->max_pkt_size);
521 527
522 seq_printf(seq, 528 seq_printf(seq,
523 " frags: %d delay: %u clone_skb: %d ifname: %s\n", 529 " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
524 pkt_dev->nfrags, 530 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
525 1000 * pkt_dev->delay_us + pkt_dev->delay_ns,
526 pkt_dev->clone_skb, pkt_dev->odev->name); 531 pkt_dev->clone_skb, pkt_dev->odev->name);
527 532
528 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 533 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
@@ -549,11 +554,14 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
549 " daddr: %s min_daddr: %s max_daddr: %s\n", b1, 554 " daddr: %s min_daddr: %s max_daddr: %s\n", b1,
550 b2, b3); 555 b2, b3);
551 556
552 } else 557 } else {
558 seq_printf(seq,
559 " dst_min: %s dst_max: %s\n",
560 pkt_dev->dst_min, pkt_dev->dst_max);
553 seq_printf(seq, 561 seq_printf(seq,
554 " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", 562 " src_min: %s src_max: %s\n",
555 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, 563 pkt_dev->src_min, pkt_dev->src_max);
556 pkt_dev->src_max); 564 }
557 565
558 seq_puts(seq, " src_mac: "); 566 seq_puts(seq, " src_mac: ");
559 567
@@ -565,7 +573,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
565 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 573 seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
566 574
567 seq_printf(seq, 575 seq_printf(seq,
568 " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 576 " udp_src_min: %d udp_src_max: %d"
577 " udp_dst_min: %d udp_dst_max: %d\n",
569 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 578 pkt_dev->udp_src_min, pkt_dev->udp_src_max,
570 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 579 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max);
571 580
@@ -581,23 +590,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
581 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 590 i == pkt_dev->nr_labels-1 ? "\n" : ", ");
582 } 591 }
583 592
584 if (pkt_dev->vlan_id != 0xffff) { 593 if (pkt_dev->vlan_id != 0xffff)
585 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", 594 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n",
586 pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi); 595 pkt_dev->vlan_id, pkt_dev->vlan_p,
587 } 596 pkt_dev->vlan_cfi);
588 597
589 if (pkt_dev->svlan_id != 0xffff) { 598 if (pkt_dev->svlan_id != 0xffff)
590 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", 599 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n",
591 pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi); 600 pkt_dev->svlan_id, pkt_dev->svlan_p,
592 } 601 pkt_dev->svlan_cfi);
593 602
594 if (pkt_dev->tos) { 603 if (pkt_dev->tos)
595 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); 604 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos);
596 }
597 605
598 if (pkt_dev->traffic_class) { 606 if (pkt_dev->traffic_class)
599 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 607 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
600 }
601 608
602 seq_printf(seq, " Flags: "); 609 seq_printf(seq, " Flags: ");
603 610
@@ -654,17 +661,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
654 661
655 seq_puts(seq, "\n"); 662 seq_puts(seq, "\n");
656 663
657 sa = pkt_dev->started_at; 664 /* not really stopped, more like last-running-at */
658 stopped = pkt_dev->stopped_at; 665 stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
659 if (pkt_dev->running) 666 idle = pkt_dev->idle_acc;
660 stopped = now; /* not really stopped, more like last-running-at */ 667 do_div(idle, NSEC_PER_USEC);
661 668
662 seq_printf(seq, 669 seq_printf(seq,
663 "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 670 "Current:\n pkts-sofar: %llu errors: %llu\n",
664 (unsigned long long)pkt_dev->sofar, 671 (unsigned long long)pkt_dev->sofar,
665 (unsigned long long)pkt_dev->errors, (unsigned long long)sa, 672 (unsigned long long)pkt_dev->errors);
666 (unsigned long long)stopped, 673
667 (unsigned long long)pkt_dev->idle_acc); 674 seq_printf(seq,
675 " started: %lluus stopped: %lluus idle: %lluus\n",
676 (unsigned long long) ktime_to_us(pkt_dev->started_at),
677 (unsigned long long) ktime_to_us(stopped),
678 (unsigned long long) idle);
668 679
669 seq_printf(seq, 680 seq_printf(seq,
670 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 681 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
@@ -696,7 +707,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
696} 707}
697 708
698 709
699static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num) 710static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
711 __u32 *num)
700{ 712{
701 int i = 0; 713 int i = 0;
702 *num = 0; 714 *num = 0;
@@ -846,9 +858,9 @@ static ssize_t pktgen_if_write(struct file *file,
846 /* Read variable name */ 858 /* Read variable name */
847 859
848 len = strn_len(&user_buffer[i], sizeof(name) - 1); 860 len = strn_len(&user_buffer[i], sizeof(name) - 1);
849 if (len < 0) { 861 if (len < 0)
850 return len; 862 return len;
851 } 863
852 memset(name, 0, sizeof(name)); 864 memset(name, 0, sizeof(name));
853 if (copy_from_user(name, &user_buffer[i], len)) 865 if (copy_from_user(name, &user_buffer[i], len))
854 return -EFAULT; 866 return -EFAULT;
@@ -872,9 +884,9 @@ static ssize_t pktgen_if_write(struct file *file,
872 884
873 if (!strcmp(name, "min_pkt_size")) { 885 if (!strcmp(name, "min_pkt_size")) {
874 len = num_arg(&user_buffer[i], 10, &value); 886 len = num_arg(&user_buffer[i], 10, &value);
875 if (len < 0) { 887 if (len < 0)
876 return len; 888 return len;
877 } 889
878 i += len; 890 i += len;
879 if (value < 14 + 20 + 8) 891 if (value < 14 + 20 + 8)
880 value = 14 + 20 + 8; 892 value = 14 + 20 + 8;
@@ -889,9 +901,9 @@ static ssize_t pktgen_if_write(struct file *file,
889 901
890 if (!strcmp(name, "max_pkt_size")) { 902 if (!strcmp(name, "max_pkt_size")) {
891 len = num_arg(&user_buffer[i], 10, &value); 903 len = num_arg(&user_buffer[i], 10, &value);
892 if (len < 0) { 904 if (len < 0)
893 return len; 905 return len;
894 } 906
895 i += len; 907 i += len;
896 if (value < 14 + 20 + 8) 908 if (value < 14 + 20 + 8)
897 value = 14 + 20 + 8; 909 value = 14 + 20 + 8;
@@ -908,9 +920,9 @@ static ssize_t pktgen_if_write(struct file *file,
908 920
909 if (!strcmp(name, "pkt_size")) { 921 if (!strcmp(name, "pkt_size")) {
910 len = num_arg(&user_buffer[i], 10, &value); 922 len = num_arg(&user_buffer[i], 10, &value);
911 if (len < 0) { 923 if (len < 0)
912 return len; 924 return len;
913 } 925
914 i += len; 926 i += len;
915 if (value < 14 + 20 + 8) 927 if (value < 14 + 20 + 8)
916 value = 14 + 20 + 8; 928 value = 14 + 20 + 8;
@@ -925,9 +937,9 @@ static ssize_t pktgen_if_write(struct file *file,
925 937
926 if (!strcmp(name, "debug")) { 938 if (!strcmp(name, "debug")) {
927 len = num_arg(&user_buffer[i], 10, &value); 939 len = num_arg(&user_buffer[i], 10, &value);
928 if (len < 0) { 940 if (len < 0)
929 return len; 941 return len;
930 } 942
931 i += len; 943 i += len;
932 debug = value; 944 debug = value;
933 sprintf(pg_result, "OK: debug=%u", debug); 945 sprintf(pg_result, "OK: debug=%u", debug);
@@ -936,9 +948,9 @@ static ssize_t pktgen_if_write(struct file *file,
936 948
937 if (!strcmp(name, "frags")) { 949 if (!strcmp(name, "frags")) {
938 len = num_arg(&user_buffer[i], 10, &value); 950 len = num_arg(&user_buffer[i], 10, &value);
939 if (len < 0) { 951 if (len < 0)
940 return len; 952 return len;
941 } 953
942 i += len; 954 i += len;
943 pkt_dev->nfrags = value; 955 pkt_dev->nfrags = value;
944 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); 956 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags);
@@ -946,26 +958,24 @@ static ssize_t pktgen_if_write(struct file *file,
946 } 958 }
947 if (!strcmp(name, "delay")) { 959 if (!strcmp(name, "delay")) {
948 len = num_arg(&user_buffer[i], 10, &value); 960 len = num_arg(&user_buffer[i], 10, &value);
949 if (len < 0) { 961 if (len < 0)
950 return len; 962 return len;
951 } 963
952 i += len; 964 i += len;
953 if (value == 0x7FFFFFFF) { 965 if (value == 0x7FFFFFFF)
954 pkt_dev->delay_us = 0x7FFFFFFF; 966 pkt_dev->delay = ULLONG_MAX;
955 pkt_dev->delay_ns = 0; 967 else
956 } else { 968 pkt_dev->delay = (u64)value * NSEC_PER_USEC;
957 pkt_dev->delay_us = value / 1000; 969
958 pkt_dev->delay_ns = value % 1000; 970 sprintf(pg_result, "OK: delay=%llu",
959 } 971 (unsigned long long) pkt_dev->delay);
960 sprintf(pg_result, "OK: delay=%u",
961 1000 * pkt_dev->delay_us + pkt_dev->delay_ns);
962 return count; 972 return count;
963 } 973 }
964 if (!strcmp(name, "udp_src_min")) { 974 if (!strcmp(name, "udp_src_min")) {
965 len = num_arg(&user_buffer[i], 10, &value); 975 len = num_arg(&user_buffer[i], 10, &value);
966 if (len < 0) { 976 if (len < 0)
967 return len; 977 return len;
968 } 978
969 i += len; 979 i += len;
970 if (value != pkt_dev->udp_src_min) { 980 if (value != pkt_dev->udp_src_min) {
971 pkt_dev->udp_src_min = value; 981 pkt_dev->udp_src_min = value;
@@ -976,9 +986,9 @@ static ssize_t pktgen_if_write(struct file *file,
976 } 986 }
977 if (!strcmp(name, "udp_dst_min")) { 987 if (!strcmp(name, "udp_dst_min")) {
978 len = num_arg(&user_buffer[i], 10, &value); 988 len = num_arg(&user_buffer[i], 10, &value);
979 if (len < 0) { 989 if (len < 0)
980 return len; 990 return len;
981 } 991
982 i += len; 992 i += len;
983 if (value != pkt_dev->udp_dst_min) { 993 if (value != pkt_dev->udp_dst_min) {
984 pkt_dev->udp_dst_min = value; 994 pkt_dev->udp_dst_min = value;
@@ -989,9 +999,9 @@ static ssize_t pktgen_if_write(struct file *file,
989 } 999 }
990 if (!strcmp(name, "udp_src_max")) { 1000 if (!strcmp(name, "udp_src_max")) {
991 len = num_arg(&user_buffer[i], 10, &value); 1001 len = num_arg(&user_buffer[i], 10, &value);
992 if (len < 0) { 1002 if (len < 0)
993 return len; 1003 return len;
994 } 1004
995 i += len; 1005 i += len;
996 if (value != pkt_dev->udp_src_max) { 1006 if (value != pkt_dev->udp_src_max) {
997 pkt_dev->udp_src_max = value; 1007 pkt_dev->udp_src_max = value;
@@ -1002,9 +1012,9 @@ static ssize_t pktgen_if_write(struct file *file,
1002 } 1012 }
1003 if (!strcmp(name, "udp_dst_max")) { 1013 if (!strcmp(name, "udp_dst_max")) {
1004 len = num_arg(&user_buffer[i], 10, &value); 1014 len = num_arg(&user_buffer[i], 10, &value);
1005 if (len < 0) { 1015 if (len < 0)
1006 return len; 1016 return len;
1007 } 1017
1008 i += len; 1018 i += len;
1009 if (value != pkt_dev->udp_dst_max) { 1019 if (value != pkt_dev->udp_dst_max) {
1010 pkt_dev->udp_dst_max = value; 1020 pkt_dev->udp_dst_max = value;
@@ -1015,9 +1025,9 @@ static ssize_t pktgen_if_write(struct file *file,
1015 } 1025 }
1016 if (!strcmp(name, "clone_skb")) { 1026 if (!strcmp(name, "clone_skb")) {
1017 len = num_arg(&user_buffer[i], 10, &value); 1027 len = num_arg(&user_buffer[i], 10, &value);
1018 if (len < 0) { 1028 if (len < 0)
1019 return len; 1029 return len;
1020 } 1030
1021 i += len; 1031 i += len;
1022 pkt_dev->clone_skb = value; 1032 pkt_dev->clone_skb = value;
1023 1033
@@ -1026,9 +1036,9 @@ static ssize_t pktgen_if_write(struct file *file,
1026 } 1036 }
1027 if (!strcmp(name, "count")) { 1037 if (!strcmp(name, "count")) {
1028 len = num_arg(&user_buffer[i], 10, &value); 1038 len = num_arg(&user_buffer[i], 10, &value);
1029 if (len < 0) { 1039 if (len < 0)
1030 return len; 1040 return len;
1031 } 1041
1032 i += len; 1042 i += len;
1033 pkt_dev->count = value; 1043 pkt_dev->count = value;
1034 sprintf(pg_result, "OK: count=%llu", 1044 sprintf(pg_result, "OK: count=%llu",
@@ -1037,9 +1047,9 @@ static ssize_t pktgen_if_write(struct file *file,
1037 } 1047 }
1038 if (!strcmp(name, "src_mac_count")) { 1048 if (!strcmp(name, "src_mac_count")) {
1039 len = num_arg(&user_buffer[i], 10, &value); 1049 len = num_arg(&user_buffer[i], 10, &value);
1040 if (len < 0) { 1050 if (len < 0)
1041 return len; 1051 return len;
1042 } 1052
1043 i += len; 1053 i += len;
1044 if (pkt_dev->src_mac_count != value) { 1054 if (pkt_dev->src_mac_count != value) {
1045 pkt_dev->src_mac_count = value; 1055 pkt_dev->src_mac_count = value;
@@ -1051,9 +1061,9 @@ static ssize_t pktgen_if_write(struct file *file,
1051 } 1061 }
1052 if (!strcmp(name, "dst_mac_count")) { 1062 if (!strcmp(name, "dst_mac_count")) {
1053 len = num_arg(&user_buffer[i], 10, &value); 1063 len = num_arg(&user_buffer[i], 10, &value);
1054 if (len < 0) { 1064 if (len < 0)
1055 return len; 1065 return len;
1056 } 1066
1057 i += len; 1067 i += len;
1058 if (pkt_dev->dst_mac_count != value) { 1068 if (pkt_dev->dst_mac_count != value) {
1059 pkt_dev->dst_mac_count = value; 1069 pkt_dev->dst_mac_count = value;
@@ -1067,9 +1077,9 @@ static ssize_t pktgen_if_write(struct file *file,
1067 char f[32]; 1077 char f[32];
1068 memset(f, 0, 32); 1078 memset(f, 0, 32);
1069 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1079 len = strn_len(&user_buffer[i], sizeof(f) - 1);
1070 if (len < 0) { 1080 if (len < 0)
1071 return len; 1081 return len;
1072 } 1082
1073 if (copy_from_user(f, &user_buffer[i], len)) 1083 if (copy_from_user(f, &user_buffer[i], len))
1074 return -EFAULT; 1084 return -EFAULT;
1075 i += len; 1085 i += len;
@@ -1168,9 +1178,8 @@ static ssize_t pktgen_if_write(struct file *file,
1168 } 1178 }
1169 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1179 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
1170 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1180 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
1171 if (len < 0) { 1181 if (len < 0)
1172 return len; 1182 return len;
1173 }
1174 1183
1175 if (copy_from_user(buf, &user_buffer[i], len)) 1184 if (copy_from_user(buf, &user_buffer[i], len))
1176 return -EFAULT; 1185 return -EFAULT;
@@ -1190,9 +1199,9 @@ static ssize_t pktgen_if_write(struct file *file,
1190 } 1199 }
1191 if (!strcmp(name, "dst_max")) { 1200 if (!strcmp(name, "dst_max")) {
1192 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1201 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
1193 if (len < 0) { 1202 if (len < 0)
1194 return len; 1203 return len;
1195 } 1204
1196 1205
1197 if (copy_from_user(buf, &user_buffer[i], len)) 1206 if (copy_from_user(buf, &user_buffer[i], len))
1198 return -EFAULT; 1207 return -EFAULT;
@@ -1303,9 +1312,9 @@ static ssize_t pktgen_if_write(struct file *file,
1303 } 1312 }
1304 if (!strcmp(name, "src_min")) { 1313 if (!strcmp(name, "src_min")) {
1305 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1314 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
1306 if (len < 0) { 1315 if (len < 0)
1307 return len; 1316 return len;
1308 } 1317
1309 if (copy_from_user(buf, &user_buffer[i], len)) 1318 if (copy_from_user(buf, &user_buffer[i], len))
1310 return -EFAULT; 1319 return -EFAULT;
1311 buf[len] = 0; 1320 buf[len] = 0;
@@ -1324,9 +1333,9 @@ static ssize_t pktgen_if_write(struct file *file,
1324 } 1333 }
1325 if (!strcmp(name, "src_max")) { 1334 if (!strcmp(name, "src_max")) {
1326 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1335 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
1327 if (len < 0) { 1336 if (len < 0)
1328 return len; 1337 return len;
1329 } 1338
1330 if (copy_from_user(buf, &user_buffer[i], len)) 1339 if (copy_from_user(buf, &user_buffer[i], len))
1331 return -EFAULT; 1340 return -EFAULT;
1332 buf[len] = 0; 1341 buf[len] = 0;
@@ -1350,9 +1359,9 @@ static ssize_t pktgen_if_write(struct file *file,
1350 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); 1359 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1351 1360
1352 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1361 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1353 if (len < 0) { 1362 if (len < 0)
1354 return len; 1363 return len;
1355 } 1364
1356 memset(valstr, 0, sizeof(valstr)); 1365 memset(valstr, 0, sizeof(valstr));
1357 if (copy_from_user(valstr, &user_buffer[i], len)) 1366 if (copy_from_user(valstr, &user_buffer[i], len))
1358 return -EFAULT; 1367 return -EFAULT;
@@ -1392,9 +1401,9 @@ static ssize_t pktgen_if_write(struct file *file,
1392 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN); 1401 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1393 1402
1394 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1403 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1395 if (len < 0) { 1404 if (len < 0)
1396 return len; 1405 return len;
1397 } 1406
1398 memset(valstr, 0, sizeof(valstr)); 1407 memset(valstr, 0, sizeof(valstr));
1399 if (copy_from_user(valstr, &user_buffer[i], len)) 1408 if (copy_from_user(valstr, &user_buffer[i], len))
1400 return -EFAULT; 1409 return -EFAULT;
@@ -1435,9 +1444,9 @@ static ssize_t pktgen_if_write(struct file *file,
1435 1444
1436 if (!strcmp(name, "flows")) { 1445 if (!strcmp(name, "flows")) {
1437 len = num_arg(&user_buffer[i], 10, &value); 1446 len = num_arg(&user_buffer[i], 10, &value);
1438 if (len < 0) { 1447 if (len < 0)
1439 return len; 1448 return len;
1440 } 1449
1441 i += len; 1450 i += len;
1442 if (value > MAX_CFLOWS) 1451 if (value > MAX_CFLOWS)
1443 value = MAX_CFLOWS; 1452 value = MAX_CFLOWS;
@@ -1449,9 +1458,9 @@ static ssize_t pktgen_if_write(struct file *file,
1449 1458
1450 if (!strcmp(name, "flowlen")) { 1459 if (!strcmp(name, "flowlen")) {
1451 len = num_arg(&user_buffer[i], 10, &value); 1460 len = num_arg(&user_buffer[i], 10, &value);
1452 if (len < 0) { 1461 if (len < 0)
1453 return len; 1462 return len;
1454 } 1463
1455 i += len; 1464 i += len;
1456 pkt_dev->lflow = value; 1465 pkt_dev->lflow = value;
1457 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1466 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
@@ -1460,9 +1469,9 @@ static ssize_t pktgen_if_write(struct file *file,
1460 1469
1461 if (!strcmp(name, "queue_map_min")) { 1470 if (!strcmp(name, "queue_map_min")) {
1462 len = num_arg(&user_buffer[i], 5, &value); 1471 len = num_arg(&user_buffer[i], 5, &value);
1463 if (len < 0) { 1472 if (len < 0)
1464 return len; 1473 return len;
1465 } 1474
1466 i += len; 1475 i += len;
1467 pkt_dev->queue_map_min = value; 1476 pkt_dev->queue_map_min = value;
1468 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); 1477 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min);
@@ -1471,9 +1480,9 @@ static ssize_t pktgen_if_write(struct file *file,
1471 1480
1472 if (!strcmp(name, "queue_map_max")) { 1481 if (!strcmp(name, "queue_map_max")) {
1473 len = num_arg(&user_buffer[i], 5, &value); 1482 len = num_arg(&user_buffer[i], 5, &value);
1474 if (len < 0) { 1483 if (len < 0)
1475 return len; 1484 return len;
1476 } 1485
1477 i += len; 1486 i += len;
1478 pkt_dev->queue_map_max = value; 1487 pkt_dev->queue_map_max = value;
1479 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); 1488 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max);
@@ -1505,9 +1514,9 @@ static ssize_t pktgen_if_write(struct file *file,
1505 1514
1506 if (!strcmp(name, "vlan_id")) { 1515 if (!strcmp(name, "vlan_id")) {
1507 len = num_arg(&user_buffer[i], 4, &value); 1516 len = num_arg(&user_buffer[i], 4, &value);
1508 if (len < 0) { 1517 if (len < 0)
1509 return len; 1518 return len;
1510 } 1519
1511 i += len; 1520 i += len;
1512 if (value <= 4095) { 1521 if (value <= 4095) {
1513 pkt_dev->vlan_id = value; /* turn on VLAN */ 1522 pkt_dev->vlan_id = value; /* turn on VLAN */
@@ -1532,9 +1541,9 @@ static ssize_t pktgen_if_write(struct file *file,
1532 1541
1533 if (!strcmp(name, "vlan_p")) { 1542 if (!strcmp(name, "vlan_p")) {
1534 len = num_arg(&user_buffer[i], 1, &value); 1543 len = num_arg(&user_buffer[i], 1, &value);
1535 if (len < 0) { 1544 if (len < 0)
1536 return len; 1545 return len;
1537 } 1546
1538 i += len; 1547 i += len;
1539 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { 1548 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
1540 pkt_dev->vlan_p = value; 1549 pkt_dev->vlan_p = value;
@@ -1547,9 +1556,9 @@ static ssize_t pktgen_if_write(struct file *file,
1547 1556
1548 if (!strcmp(name, "vlan_cfi")) { 1557 if (!strcmp(name, "vlan_cfi")) {
1549 len = num_arg(&user_buffer[i], 1, &value); 1558 len = num_arg(&user_buffer[i], 1, &value);
1550 if (len < 0) { 1559 if (len < 0)
1551 return len; 1560 return len;
1552 } 1561
1553 i += len; 1562 i += len;
1554 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { 1563 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
1555 pkt_dev->vlan_cfi = value; 1564 pkt_dev->vlan_cfi = value;
@@ -1562,9 +1571,9 @@ static ssize_t pktgen_if_write(struct file *file,
1562 1571
1563 if (!strcmp(name, "svlan_id")) { 1572 if (!strcmp(name, "svlan_id")) {
1564 len = num_arg(&user_buffer[i], 4, &value); 1573 len = num_arg(&user_buffer[i], 4, &value);
1565 if (len < 0) { 1574 if (len < 0)
1566 return len; 1575 return len;
1567 } 1576
1568 i += len; 1577 i += len;
1569 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { 1578 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
1570 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1579 pkt_dev->svlan_id = value; /* turn on SVLAN */
@@ -1589,9 +1598,9 @@ static ssize_t pktgen_if_write(struct file *file,
1589 1598
1590 if (!strcmp(name, "svlan_p")) { 1599 if (!strcmp(name, "svlan_p")) {
1591 len = num_arg(&user_buffer[i], 1, &value); 1600 len = num_arg(&user_buffer[i], 1, &value);
1592 if (len < 0) { 1601 if (len < 0)
1593 return len; 1602 return len;
1594 } 1603
1595 i += len; 1604 i += len;
1596 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { 1605 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
1597 pkt_dev->svlan_p = value; 1606 pkt_dev->svlan_p = value;
@@ -1604,9 +1613,9 @@ static ssize_t pktgen_if_write(struct file *file,
1604 1613
1605 if (!strcmp(name, "svlan_cfi")) { 1614 if (!strcmp(name, "svlan_cfi")) {
1606 len = num_arg(&user_buffer[i], 1, &value); 1615 len = num_arg(&user_buffer[i], 1, &value);
1607 if (len < 0) { 1616 if (len < 0)
1608 return len; 1617 return len;
1609 } 1618
1610 i += len; 1619 i += len;
1611 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { 1620 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
1612 pkt_dev->svlan_cfi = value; 1621 pkt_dev->svlan_cfi = value;
@@ -1620,9 +1629,9 @@ static ssize_t pktgen_if_write(struct file *file,
1620 if (!strcmp(name, "tos")) { 1629 if (!strcmp(name, "tos")) {
1621 __u32 tmp_value = 0; 1630 __u32 tmp_value = 0;
1622 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1631 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1623 if (len < 0) { 1632 if (len < 0)
1624 return len; 1633 return len;
1625 } 1634
1626 i += len; 1635 i += len;
1627 if (len == 2) { 1636 if (len == 2) {
1628 pkt_dev->tos = tmp_value; 1637 pkt_dev->tos = tmp_value;
@@ -1636,9 +1645,9 @@ static ssize_t pktgen_if_write(struct file *file,
1636 if (!strcmp(name, "traffic_class")) { 1645 if (!strcmp(name, "traffic_class")) {
1637 __u32 tmp_value = 0; 1646 __u32 tmp_value = 0;
1638 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1647 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1639 if (len < 0) { 1648 if (len < 0)
1640 return len; 1649 return len;
1641 } 1650
1642 i += len; 1651 i += len;
1643 if (len == 2) { 1652 if (len == 2) {
1644 pkt_dev->traffic_class = tmp_value; 1653 pkt_dev->traffic_class = tmp_value;
@@ -1670,7 +1679,7 @@ static const struct file_operations pktgen_if_fops = {
1670static int pktgen_thread_show(struct seq_file *seq, void *v) 1679static int pktgen_thread_show(struct seq_file *seq, void *v)
1671{ 1680{
1672 struct pktgen_thread *t = seq->private; 1681 struct pktgen_thread *t = seq->private;
1673 struct pktgen_dev *pkt_dev; 1682 const struct pktgen_dev *pkt_dev;
1674 1683
1675 BUG_ON(!t); 1684 BUG_ON(!t);
1676 1685
@@ -1873,8 +1882,10 @@ static void pktgen_change_name(struct net_device *dev)
1873 1882
1874 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 1883 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
1875 1884
1876 pkt_dev->entry = create_proc_entry(dev->name, 0600, 1885 pkt_dev->entry = proc_create_data(dev->name, 0600,
1877 pg_proc_dir); 1886 pg_proc_dir,
1887 &pktgen_if_fops,
1888 pkt_dev);
1878 if (!pkt_dev->entry) 1889 if (!pkt_dev->entry)
1879 printk(KERN_ERR "pktgen: can't move proc " 1890 printk(KERN_ERR "pktgen: can't move proc "
1880 " entry for '%s'\n", dev->name); 1891 " entry for '%s'\n", dev->name);
@@ -1908,13 +1919,14 @@ static int pktgen_device_event(struct notifier_block *unused,
1908 return NOTIFY_DONE; 1919 return NOTIFY_DONE;
1909} 1920}
1910 1921
1911static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname) 1922static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
1923 const char *ifname)
1912{ 1924{
1913 char b[IFNAMSIZ+5]; 1925 char b[IFNAMSIZ+5];
1914 int i = 0; 1926 int i = 0;
1915 1927
1916 for(i=0; ifname[i] != '@'; i++) { 1928 for (i = 0; ifname[i] != '@'; i++) {
1917 if(i == IFNAMSIZ) 1929 if (i == IFNAMSIZ)
1918 break; 1930 break;
1919 1931
1920 b[i] = ifname[i]; 1932 b[i] = ifname[i];
@@ -1981,7 +1993,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1981 printk(KERN_WARNING "pktgen: WARNING: Requested " 1993 printk(KERN_WARNING "pktgen: WARNING: Requested "
1982 "queue_map_min (zero-based) (%d) exceeds valid range " 1994 "queue_map_min (zero-based) (%d) exceeds valid range "
1983 "[0 - %d] for (%d) queues on %s, resetting\n", 1995 "[0 - %d] for (%d) queues on %s, resetting\n",
1984 pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq, 1996 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
1985 pkt_dev->odev->name); 1997 pkt_dev->odev->name);
1986 pkt_dev->queue_map_min = ntxq - 1; 1998 pkt_dev->queue_map_min = ntxq - 1;
1987 } 1999 }
@@ -1989,7 +2001,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1989 printk(KERN_WARNING "pktgen: WARNING: Requested " 2001 printk(KERN_WARNING "pktgen: WARNING: Requested "
1990 "queue_map_max (zero-based) (%d) exceeds valid range " 2002 "queue_map_max (zero-based) (%d) exceeds valid range "
1991 "[0 - %d] for (%d) queues on %s, resetting\n", 2003 "[0 - %d] for (%d) queues on %s, resetting\n",
1992 pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq, 2004 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
1993 pkt_dev->odev->name); 2005 pkt_dev->odev->name);
1994 pkt_dev->queue_map_max = ntxq - 1; 2006 pkt_dev->queue_map_max = ntxq - 1;
1995 } 2007 }
@@ -2030,7 +2042,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2030 */ 2042 */
2031 2043
2032 rcu_read_lock(); 2044 rcu_read_lock();
2033 if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { 2045 idev = __in6_dev_get(pkt_dev->odev);
2046 if (idev) {
2034 struct inet6_ifaddr *ifp; 2047 struct inet6_ifaddr *ifp;
2035 2048
2036 read_lock_bh(&idev->lock); 2049 read_lock_bh(&idev->lock);
@@ -2089,27 +2102,40 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2089 pkt_dev->nflows = 0; 2102 pkt_dev->nflows = 0;
2090} 2103}
2091 2104
2092static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) 2105
2106static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2093{ 2107{
2094 __u64 start; 2108 ktime_t start;
2095 __u64 now; 2109 s32 remaining;
2110 struct hrtimer_sleeper t;
2111
2112 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2113 hrtimer_set_expires(&t.timer, spin_until);
2114
2115 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
2116 if (remaining <= 0)
2117 return;
2096 2118
2097 start = now = getCurUs(); 2119 start = ktime_now();
2098 while (now < spin_until_us) { 2120 if (remaining < 100)
2099 /* TODO: optimize sleeping behavior */ 2121 udelay(remaining); /* really small just spin */
2100 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 2122 else {
2101 schedule_timeout_interruptible(1); 2123 /* see do_nanosleep */
2102 else if (spin_until_us - now > 100) { 2124 hrtimer_init_sleeper(&t, current);
2103 if (!pkt_dev->running) 2125 do {
2104 return; 2126 set_current_state(TASK_INTERRUPTIBLE);
2105 if (need_resched()) 2127 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
2128 if (!hrtimer_active(&t.timer))
2129 t.task = NULL;
2130
2131 if (likely(t.task))
2106 schedule(); 2132 schedule();
2107 }
2108 2133
2109 now = getCurUs(); 2134 hrtimer_cancel(&t.timer);
2135 } while (t.task && pkt_dev->running && !signal_pending(current));
2136 __set_current_state(TASK_RUNNING);
2110 } 2137 }
2111 2138 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
2112 pkt_dev->idle_acc += now - start;
2113} 2139}
2114 2140
2115static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2141static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -2120,13 +2146,9 @@ static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2120 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); 2146 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2121} 2147}
2122 2148
2123static inline int f_seen(struct pktgen_dev *pkt_dev, int flow) 2149static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow)
2124{ 2150{
2125 2151 return !!(pkt_dev->flows[flow].flags & F_INIT);
2126 if (pkt_dev->flows[flow].flags & F_INIT)
2127 return 1;
2128 else
2129 return 0;
2130} 2152}
2131 2153
2132static inline int f_pick(struct pktgen_dev *pkt_dev) 2154static inline int f_pick(struct pktgen_dev *pkt_dev)
@@ -2174,7 +2196,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2174 if (x) { 2196 if (x) {
2175 pkt_dev->flows[flow].x = x; 2197 pkt_dev->flows[flow].x = x;
2176 set_pkt_overhead(pkt_dev); 2198 set_pkt_overhead(pkt_dev);
2177 pkt_dev->pkt_overhead+=x->props.header_len; 2199 pkt_dev->pkt_overhead += x->props.header_len;
2178 } 2200 }
2179 2201
2180 } 2202 }
@@ -2313,18 +2335,18 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2313 2335
2314 if (!(pkt_dev->flags & F_IPV6)) { 2336 if (!(pkt_dev->flags & F_IPV6)) {
2315 2337
2316 if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = 2338 imn = ntohl(pkt_dev->saddr_min);
2317 ntohl(pkt_dev-> 2339 imx = ntohl(pkt_dev->saddr_max);
2318 saddr_max))) { 2340 if (imn < imx) {
2319 __u32 t; 2341 __u32 t;
2320 if (pkt_dev->flags & F_IPSRC_RND) 2342 if (pkt_dev->flags & F_IPSRC_RND)
2321 t = random32() % (imx - imn) + imn; 2343 t = random32() % (imx - imn) + imn;
2322 else { 2344 else {
2323 t = ntohl(pkt_dev->cur_saddr); 2345 t = ntohl(pkt_dev->cur_saddr);
2324 t++; 2346 t++;
2325 if (t > imx) { 2347 if (t > imx)
2326 t = imn; 2348 t = imn;
2327 } 2349
2328 } 2350 }
2329 pkt_dev->cur_saddr = htonl(t); 2351 pkt_dev->cur_saddr = htonl(t);
2330 } 2352 }
@@ -2435,14 +2457,14 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2435 if (err) 2457 if (err)
2436 goto error; 2458 goto error;
2437 2459
2438 x->curlft.bytes +=skb->len; 2460 x->curlft.bytes += skb->len;
2439 x->curlft.packets++; 2461 x->curlft.packets++;
2440error: 2462error:
2441 spin_unlock(&x->lock); 2463 spin_unlock(&x->lock);
2442 return err; 2464 return err;
2443} 2465}
2444 2466
2445static inline void free_SAs(struct pktgen_dev *pkt_dev) 2467static void free_SAs(struct pktgen_dev *pkt_dev)
2446{ 2468{
2447 if (pkt_dev->cflows) { 2469 if (pkt_dev->cflows) {
2448 /* let go of the SAs if we have them */ 2470 /* let go of the SAs if we have them */
@@ -2457,7 +2479,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2457 } 2479 }
2458} 2480}
2459 2481
2460static inline int process_ipsec(struct pktgen_dev *pkt_dev, 2482static int process_ipsec(struct pktgen_dev *pkt_dev,
2461 struct sk_buff *skb, __be16 protocol) 2483 struct sk_buff *skb, __be16 protocol)
2462{ 2484{
2463 if (pkt_dev->flags & F_IPSEC_ON) { 2485 if (pkt_dev->flags & F_IPSEC_ON) {
@@ -2467,11 +2489,11 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
2467 int ret; 2489 int ret;
2468 __u8 *eth; 2490 __u8 *eth;
2469 nhead = x->props.header_len - skb_headroom(skb); 2491 nhead = x->props.header_len - skb_headroom(skb);
2470 if (nhead >0) { 2492 if (nhead > 0) {
2471 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2493 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2472 if (ret < 0) { 2494 if (ret < 0) {
2473 printk(KERN_ERR "Error expanding " 2495 printk(KERN_ERR "Error expanding "
2474 "ipsec packet %d\n",ret); 2496 "ipsec packet %d\n", ret);
2475 goto err; 2497 goto err;
2476 } 2498 }
2477 } 2499 }
@@ -2481,13 +2503,13 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
2481 ret = pktgen_output_ipsec(skb, pkt_dev); 2503 ret = pktgen_output_ipsec(skb, pkt_dev);
2482 if (ret) { 2504 if (ret) {
2483 printk(KERN_ERR "Error creating ipsec " 2505 printk(KERN_ERR "Error creating ipsec "
2484 "packet %d\n",ret); 2506 "packet %d\n", ret);
2485 goto err; 2507 goto err;
2486 } 2508 }
2487 /* restore ll */ 2509 /* restore ll */
2488 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2510 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2489 memcpy(eth, pkt_dev->hh, 12); 2511 memcpy(eth, pkt_dev->hh, 12);
2490 *(u16 *) & eth[12] = protocol; 2512 *(u16 *) &eth[12] = protocol;
2491 } 2513 }
2492 } 2514 }
2493 return 1; 2515 return 1;
@@ -2500,9 +2522,9 @@ err:
2500static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2522static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2501{ 2523{
2502 unsigned i; 2524 unsigned i;
2503 for (i = 0; i < pkt_dev->nr_labels; i++) { 2525 for (i = 0; i < pkt_dev->nr_labels; i++)
2504 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2526 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2505 } 2527
2506 mpls--; 2528 mpls--;
2507 *mpls |= MPLS_STACK_BOTTOM; 2529 *mpls |= MPLS_STACK_BOTTOM;
2508} 2530}
@@ -2543,8 +2565,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2543 mod_cur_headers(pkt_dev); 2565 mod_cur_headers(pkt_dev);
2544 2566
2545 datalen = (odev->hard_header_len + 16) & ~0xf; 2567 datalen = (odev->hard_header_len + 16) & ~0xf;
2546 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2568 skb = __netdev_alloc_skb(odev,
2547 pkt_dev->pkt_overhead, GFP_ATOMIC); 2569 pkt_dev->cur_pkt_size + 64
2570 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
2548 if (!skb) { 2571 if (!skb) {
2549 sprintf(pkt_dev->result, "No memory"); 2572 sprintf(pkt_dev->result, "No memory");
2550 return NULL; 2573 return NULL;
@@ -2668,8 +2691,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2668 } 2691 }
2669 } 2692 }
2670 2693
2671 /* Stamp the time, and sequence number, convert them to network byte order */ 2694 /* Stamp the time, and sequence number,
2672 2695 * convert them to network byte order
2696 */
2673 if (pgh) { 2697 if (pgh) {
2674 struct timeval timestamp; 2698 struct timeval timestamp;
2675 2699
@@ -2882,8 +2906,9 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2882 queue_map = pkt_dev->cur_queue_map; 2906 queue_map = pkt_dev->cur_queue_map;
2883 mod_cur_headers(pkt_dev); 2907 mod_cur_headers(pkt_dev);
2884 2908
2885 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2909 skb = __netdev_alloc_skb(odev,
2886 pkt_dev->pkt_overhead, GFP_ATOMIC); 2910 pkt_dev->cur_pkt_size + 64
2911 + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
2887 if (!skb) { 2912 if (!skb) {
2888 sprintf(pkt_dev->result, "No memory"); 2913 sprintf(pkt_dev->result, "No memory");
2889 return NULL; 2914 return NULL;
@@ -2922,7 +2947,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2922 udph = udp_hdr(skb); 2947 udph = udp_hdr(skb);
2923 2948
2924 memcpy(eth, pkt_dev->hh, 12); 2949 memcpy(eth, pkt_dev->hh, 12);
2925 *(__be16 *) & eth[12] = protocol; 2950 *(__be16 *) &eth[12] = protocol;
2926 2951
2927 /* Eth + IPh + UDPh + mpls */ 2952 /* Eth + IPh + UDPh + mpls */
2928 datalen = pkt_dev->cur_pkt_size - 14 - 2953 datalen = pkt_dev->cur_pkt_size - 14 -
@@ -3016,8 +3041,10 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3016 } 3041 }
3017 } 3042 }
3018 3043
3019 /* Stamp the time, and sequence number, convert them to network byte order */ 3044 /* Stamp the time, and sequence number,
3020 /* should we update cloned packets too ? */ 3045 * convert them to network byte order
3046 * should we update cloned packets too ?
3047 */
3021 if (pgh) { 3048 if (pgh) {
3022 struct timeval timestamp; 3049 struct timeval timestamp;
3023 3050
@@ -3033,8 +3060,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3033 return skb; 3060 return skb;
3034} 3061}
3035 3062
3036static inline struct sk_buff *fill_packet(struct net_device *odev, 3063static struct sk_buff *fill_packet(struct net_device *odev,
3037 struct pktgen_dev *pkt_dev) 3064 struct pktgen_dev *pkt_dev)
3038{ 3065{
3039 if (pkt_dev->flags & F_IPV6) 3066 if (pkt_dev->flags & F_IPV6)
3040 return fill_packet_ipv6(odev, pkt_dev); 3067 return fill_packet_ipv6(odev, pkt_dev);
@@ -3072,9 +3099,9 @@ static void pktgen_run(struct pktgen_thread *t)
3072 pktgen_clear_counters(pkt_dev); 3099 pktgen_clear_counters(pkt_dev);
3073 pkt_dev->running = 1; /* Cranke yeself! */ 3100 pkt_dev->running = 1; /* Cranke yeself! */
3074 pkt_dev->skb = NULL; 3101 pkt_dev->skb = NULL;
3075 pkt_dev->started_at = getCurUs(); 3102 pkt_dev->started_at =
3076 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 3103 pkt_dev->next_tx = ktime_now();
3077 pkt_dev->next_tx_ns = 0; 3104
3078 set_pkt_overhead(pkt_dev); 3105 set_pkt_overhead(pkt_dev);
3079 3106
3080 strcpy(pkt_dev->result, "Starting"); 3107 strcpy(pkt_dev->result, "Starting");
@@ -3101,17 +3128,14 @@ static void pktgen_stop_all_threads_ifs(void)
3101 mutex_unlock(&pktgen_thread_lock); 3128 mutex_unlock(&pktgen_thread_lock);
3102} 3129}
3103 3130
3104static int thread_is_running(struct pktgen_thread *t) 3131static int thread_is_running(const struct pktgen_thread *t)
3105{ 3132{
3106 struct pktgen_dev *pkt_dev; 3133 const struct pktgen_dev *pkt_dev;
3107 int res = 0;
3108 3134
3109 list_for_each_entry(pkt_dev, &t->if_list, list) 3135 list_for_each_entry(pkt_dev, &t->if_list, list)
3110 if (pkt_dev->running) { 3136 if (pkt_dev->running)
3111 res = 1; 3137 return 1;
3112 break; 3138 return 0;
3113 }
3114 return res;
3115} 3139}
3116 3140
3117static int pktgen_wait_thread_run(struct pktgen_thread *t) 3141static int pktgen_wait_thread_run(struct pktgen_thread *t)
@@ -3168,7 +3192,8 @@ static void pktgen_run_all_threads(void)
3168 3192
3169 mutex_unlock(&pktgen_thread_lock); 3193 mutex_unlock(&pktgen_thread_lock);
3170 3194
3171 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 3195 /* Propagate thread->control */
3196 schedule_timeout_interruptible(msecs_to_jiffies(125));
3172 3197
3173 pktgen_wait_all_threads_run(); 3198 pktgen_wait_all_threads_run();
3174} 3199}
@@ -3186,35 +3211,29 @@ static void pktgen_reset_all_threads(void)
3186 3211
3187 mutex_unlock(&pktgen_thread_lock); 3212 mutex_unlock(&pktgen_thread_lock);
3188 3213
3189 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 3214 /* Propagate thread->control */
3215 schedule_timeout_interruptible(msecs_to_jiffies(125));
3190 3216
3191 pktgen_wait_all_threads_run(); 3217 pktgen_wait_all_threads_run();
3192} 3218}
3193 3219
3194static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3220static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3195{ 3221{
3196 __u64 total_us, bps, mbps, pps, idle; 3222 __u64 bps, mbps, pps;
3197 char *p = pkt_dev->result; 3223 char *p = pkt_dev->result;
3198 3224 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
3199 total_us = pkt_dev->stopped_at - pkt_dev->started_at; 3225 pkt_dev->started_at);
3200 3226 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3201 idle = pkt_dev->idle_acc; 3227
3202 3228 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
3203 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3229 (unsigned long long)ktime_to_us(elapsed),
3204 (unsigned long long)total_us, 3230 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3205 (unsigned long long)(total_us - idle), 3231 (unsigned long long)ktime_to_us(idle),
3206 (unsigned long long)idle,
3207 (unsigned long long)pkt_dev->sofar, 3232 (unsigned long long)pkt_dev->sofar,
3208 pkt_dev->cur_pkt_size, nr_frags); 3233 pkt_dev->cur_pkt_size, nr_frags);
3209 3234
3210 pps = pkt_dev->sofar * USEC_PER_SEC; 3235 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
3211 3236 ktime_to_ns(elapsed));
3212 while ((total_us >> 32) != 0) {
3213 pps >>= 1;
3214 total_us >>= 1;
3215 }
3216
3217 do_div(pps, total_us);
3218 3237
3219 bps = pps * 8 * pkt_dev->cur_pkt_size; 3238 bps = pps * 8 * pkt_dev->cur_pkt_size;
3220 3239
@@ -3228,7 +3247,6 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3228} 3247}
3229 3248
3230/* Set stopped-at timer, remove from running list, do counters & statistics */ 3249/* Set stopped-at timer, remove from running list, do counters & statistics */
3231
3232static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 3250static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3233{ 3251{
3234 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3252 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
@@ -3239,7 +3257,9 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3239 return -EINVAL; 3257 return -EINVAL;
3240 } 3258 }
3241 3259
3242 pkt_dev->stopped_at = getCurUs(); 3260 kfree_skb(pkt_dev->skb);
3261 pkt_dev->skb = NULL;
3262 pkt_dev->stopped_at = ktime_now();
3243 pkt_dev->running = 0; 3263 pkt_dev->running = 0;
3244 3264
3245 show_results(pkt_dev, nr_frags); 3265 show_results(pkt_dev, nr_frags);
@@ -3258,7 +3278,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3258 continue; 3278 continue;
3259 if (best == NULL) 3279 if (best == NULL)
3260 best = pkt_dev; 3280 best = pkt_dev;
3261 else if (pkt_dev->next_tx_us < best->next_tx_us) 3281 else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
3262 best = pkt_dev; 3282 best = pkt_dev;
3263 } 3283 }
3264 if_unlock(t); 3284 if_unlock(t);
@@ -3275,9 +3295,6 @@ static void pktgen_stop(struct pktgen_thread *t)
3275 3295
3276 list_for_each_entry(pkt_dev, &t->if_list, list) { 3296 list_for_each_entry(pkt_dev, &t->if_list, list) {
3277 pktgen_stop_device(pkt_dev); 3297 pktgen_stop_device(pkt_dev);
3278 kfree_skb(pkt_dev->skb);
3279
3280 pkt_dev->skb = NULL;
3281 } 3298 }
3282 3299
3283 if_unlock(t); 3300 if_unlock(t);
@@ -3348,30 +3365,37 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3348 mutex_unlock(&pktgen_thread_lock); 3365 mutex_unlock(&pktgen_thread_lock);
3349} 3366}
3350 3367
3351static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3368static void idle(struct pktgen_dev *pkt_dev)
3369{
3370 ktime_t idle_start = ktime_now();
3371
3372 if (need_resched())
3373 schedule();
3374 else
3375 cpu_relax();
3376
3377 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
3378}
3379
3380
3381static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3352{ 3382{
3353 struct net_device *odev = pkt_dev->odev; 3383 struct net_device *odev = pkt_dev->odev;
3354 int (*xmit)(struct sk_buff *, struct net_device *) 3384 netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
3355 = odev->netdev_ops->ndo_start_xmit; 3385 = odev->netdev_ops->ndo_start_xmit;
3356 struct netdev_queue *txq; 3386 struct netdev_queue *txq;
3357 __u64 idle_start = 0;
3358 u16 queue_map; 3387 u16 queue_map;
3359 int ret; 3388 int ret;
3360 3389
3361 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3390 if (pkt_dev->delay) {
3362 u64 now; 3391 spin(pkt_dev, pkt_dev->next_tx);
3363
3364 now = getCurUs();
3365 if (now < pkt_dev->next_tx_us)
3366 spin(pkt_dev, pkt_dev->next_tx_us);
3367 3392
3368 /* This is max DELAY, this has special meaning of 3393 /* This is max DELAY, this has special meaning of
3369 * "never transmit" 3394 * "never transmit"
3370 */ 3395 */
3371 if (pkt_dev->delay_us == 0x7FFFFFFF) { 3396 if (pkt_dev->delay == ULLONG_MAX) {
3372 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; 3397 pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
3373 pkt_dev->next_tx_ns = pkt_dev->delay_ns; 3398 return;
3374 goto out;
3375 } 3399 }
3376 } 3400 }
3377 3401
@@ -3383,47 +3407,32 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3383 } 3407 }
3384 3408
3385 txq = netdev_get_tx_queue(odev, queue_map); 3409 txq = netdev_get_tx_queue(odev, queue_map);
3386 if (netif_tx_queue_stopped(txq) || 3410 /* Did we saturate the queue already? */
3387 netif_tx_queue_frozen(txq) || 3411 if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
3388 need_resched()) { 3412 /* If device is down, then all queues are permnantly frozen */
3389 idle_start = getCurUs(); 3413 if (netif_running(odev))
3390 3414 idle(pkt_dev);
3391 if (!netif_running(odev)) { 3415 else
3392 pktgen_stop_device(pkt_dev); 3416 pktgen_stop_device(pkt_dev);
3393 kfree_skb(pkt_dev->skb); 3417 return;
3394 pkt_dev->skb = NULL;
3395 goto out;
3396 }
3397 if (need_resched())
3398 schedule();
3399
3400 pkt_dev->idle_acc += getCurUs() - idle_start;
3401
3402 if (netif_tx_queue_stopped(txq) ||
3403 netif_tx_queue_frozen(txq)) {
3404 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3405 pkt_dev->next_tx_ns = 0;
3406 goto out; /* Try the next interface */
3407 }
3408 } 3418 }
3409 3419
3410 if (pkt_dev->last_ok || !pkt_dev->skb) { 3420 if (!pkt_dev->skb || (pkt_dev->last_ok &&
3411 if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) 3421 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
3412 || (!pkt_dev->skb)) { 3422 /* build a new pkt */
3413 /* build a new pkt */ 3423 kfree_skb(pkt_dev->skb);
3414 kfree_skb(pkt_dev->skb);
3415 3424
3416 pkt_dev->skb = fill_packet(odev, pkt_dev); 3425 pkt_dev->skb = fill_packet(odev, pkt_dev);
3417 if (pkt_dev->skb == NULL) { 3426 if (pkt_dev->skb == NULL) {
3418 printk(KERN_ERR "pktgen: ERROR: couldn't " 3427 printk(KERN_ERR "pktgen: ERROR: couldn't "
3419 "allocate skb in fill_packet.\n"); 3428 "allocate skb in fill_packet.\n");
3420 schedule(); 3429 schedule();
3421 pkt_dev->clone_count--; /* back out increment, OOM */ 3430 pkt_dev->clone_count--; /* back out increment, OOM */
3422 goto out; 3431 return;
3423 }
3424 pkt_dev->allocated_skbs++;
3425 pkt_dev->clone_count = 0; /* reset counter */
3426 } 3432 }
3433
3434 pkt_dev->allocated_skbs++;
3435 pkt_dev->clone_count = 0; /* reset counter */
3427 } 3436 }
3428 3437
3429 /* fill_packet() might have changed the queue */ 3438 /* fill_packet() might have changed the queue */
@@ -3431,73 +3440,53 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3431 txq = netdev_get_tx_queue(odev, queue_map); 3440 txq = netdev_get_tx_queue(odev, queue_map);
3432 3441
3433 __netif_tx_lock_bh(txq); 3442 __netif_tx_lock_bh(txq);
3434 if (!netif_tx_queue_stopped(txq) && 3443 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
3435 !netif_tx_queue_frozen(txq)) { 3444 pkt_dev->last_ok = 0;
3436 3445 else {
3437 atomic_inc(&(pkt_dev->skb->users)); 3446 atomic_inc(&(pkt_dev->skb->users));
3438 retry_now: 3447
3448 retry_now:
3439 ret = (*xmit)(pkt_dev->skb, odev); 3449 ret = (*xmit)(pkt_dev->skb, odev);
3440 if (likely(ret == NETDEV_TX_OK)) { 3450 switch (ret) {
3451 case NETDEV_TX_OK:
3441 txq_trans_update(txq); 3452 txq_trans_update(txq);
3442 pkt_dev->last_ok = 1; 3453 pkt_dev->last_ok = 1;
3443 pkt_dev->sofar++; 3454 pkt_dev->sofar++;
3444 pkt_dev->seq_num++; 3455 pkt_dev->seq_num++;
3445 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 3456 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
3446 3457 break;
3447 } else if (ret == NETDEV_TX_LOCKED 3458 case NETDEV_TX_LOCKED:
3448 && (odev->features & NETIF_F_LLTX)) {
3449 cpu_relax(); 3459 cpu_relax();
3450 goto retry_now; 3460 goto retry_now;
3451 } else { /* Retry it next time */ 3461 default: /* Drivers are not supposed to return other values! */
3452 3462 if (net_ratelimit())
3453 atomic_dec(&(pkt_dev->skb->users)); 3463 pr_info("pktgen: %s xmit error: %d\n",
3454 3464 odev->name, ret);
3455 if (debug && net_ratelimit())
3456 printk(KERN_INFO "pktgen: Hard xmit error\n");
3457
3458 pkt_dev->errors++; 3465 pkt_dev->errors++;
3466 /* fallthru */
3467 case NETDEV_TX_BUSY:
3468 /* Retry it next time */
3469 atomic_dec(&(pkt_dev->skb->users));
3459 pkt_dev->last_ok = 0; 3470 pkt_dev->last_ok = 0;
3460 } 3471 }
3461 3472
3462 pkt_dev->next_tx_us = getCurUs(); 3473 if (pkt_dev->delay)
3463 pkt_dev->next_tx_ns = 0; 3474 pkt_dev->next_tx = ktime_add_ns(ktime_now(),
3464 3475 pkt_dev->delay);
3465 pkt_dev->next_tx_us += pkt_dev->delay_us;
3466 pkt_dev->next_tx_ns += pkt_dev->delay_ns;
3467
3468 if (pkt_dev->next_tx_ns > 1000) {
3469 pkt_dev->next_tx_us++;
3470 pkt_dev->next_tx_ns -= 1000;
3471 }
3472 } 3476 }
3473
3474 else { /* Retry it next time */
3475 pkt_dev->last_ok = 0;
3476 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3477 pkt_dev->next_tx_ns = 0;
3478 }
3479
3480 __netif_tx_unlock_bh(txq); 3477 __netif_tx_unlock_bh(txq);
3481 3478
3482 /* If pkt_dev->count is zero, then run forever */ 3479 /* If pkt_dev->count is zero, then run forever */
3483 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3480 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3484 if (atomic_read(&(pkt_dev->skb->users)) != 1) { 3481 while (atomic_read(&(pkt_dev->skb->users)) != 1) {
3485 idle_start = getCurUs(); 3482 if (signal_pending(current))
3486 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3483 break;
3487 if (signal_pending(current)) { 3484 idle(pkt_dev);
3488 break;
3489 }
3490 schedule();
3491 }
3492 pkt_dev->idle_acc += getCurUs() - idle_start;
3493 } 3485 }
3494 3486
3495 /* Done with this */ 3487 /* Done with this */
3496 pktgen_stop_device(pkt_dev); 3488 pktgen_stop_device(pkt_dev);
3497 kfree_skb(pkt_dev->skb);
3498 pkt_dev->skb = NULL;
3499 } 3489 }
3500out:;
3501} 3490}
3502 3491
3503/* 3492/*
@@ -3516,7 +3505,8 @@ static int pktgen_thread_worker(void *arg)
3516 init_waitqueue_head(&t->queue); 3505 init_waitqueue_head(&t->queue);
3517 complete(&t->start_done); 3506 complete(&t->start_done);
3518 3507
3519 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3508 pr_debug("pktgen: starting pktgen/%d: pid=%d\n",
3509 cpu, task_pid_nr(current));
3520 3510
3521 set_current_state(TASK_INTERRUPTIBLE); 3511 set_current_state(TASK_INTERRUPTIBLE);
3522 3512
@@ -3651,8 +3641,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3651 pkt_dev->max_pkt_size = ETH_ZLEN; 3641 pkt_dev->max_pkt_size = ETH_ZLEN;
3652 pkt_dev->nfrags = 0; 3642 pkt_dev->nfrags = 0;
3653 pkt_dev->clone_skb = pg_clone_skb_d; 3643 pkt_dev->clone_skb = pg_clone_skb_d;
3654 pkt_dev->delay_us = pg_delay_d / 1000; 3644 pkt_dev->delay = pg_delay_d;
3655 pkt_dev->delay_ns = pg_delay_d % 1000;
3656 pkt_dev->count = pg_count_d; 3645 pkt_dev->count = pg_count_d;
3657 pkt_dev->sofar = 0; 3646 pkt_dev->sofar = 0;
3658 pkt_dev->udp_src_min = 9; /* sink port */ 3647 pkt_dev->udp_src_min = 9; /* sink port */
@@ -3864,10 +3853,15 @@ static void __exit pg_cleanup(void)
3864module_init(pg_init); 3853module_init(pg_init);
3865module_exit(pg_cleanup); 3854module_exit(pg_cleanup);
3866 3855
3867MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); 3856MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
3868MODULE_DESCRIPTION("Packet Generator tool"); 3857MODULE_DESCRIPTION("Packet Generator tool");
3869MODULE_LICENSE("GPL"); 3858MODULE_LICENSE("GPL");
3859MODULE_VERSION(VERSION);
3870module_param(pg_count_d, int, 0); 3860module_param(pg_count_d, int, 0);
3861MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
3871module_param(pg_delay_d, int, 0); 3862module_param(pg_delay_d, int, 0);
3863MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
3872module_param(pg_clone_skb_d, int, 0); 3864module_param(pg_clone_skb_d, int, 0);
3865MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
3873module_param(debug, int, 0); 3866module_param(debug, int, 0);
3867MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b44775f9f2bf..eb42873f2a3a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -606,7 +606,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
606 int type, u32 pid, u32 seq, u32 change, 606 int type, u32 pid, u32 seq, u32 change,
607 unsigned int flags) 607 unsigned int flags)
608{ 608{
609 struct netdev_queue *txq;
610 struct ifinfomsg *ifm; 609 struct ifinfomsg *ifm;
611 struct nlmsghdr *nlh; 610 struct nlmsghdr *nlh;
612 const struct net_device_stats *stats; 611 const struct net_device_stats *stats;
@@ -637,9 +636,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
637 if (dev->master) 636 if (dev->master)
638 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
639 638
640 txq = netdev_get_tx_queue(dev, 0); 639 if (dev->qdisc)
641 if (txq->qdisc_sleeping) 640 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
642 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
643 641
644 if (dev->ifalias) 642 if (dev->ifalias)
645 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); 643 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
@@ -974,12 +972,20 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
974{ 972{
975 int err; 973 int err;
976 struct net_device *dev; 974 struct net_device *dev;
975 unsigned int num_queues = 1;
976 unsigned int real_num_queues = 1;
977 977
978 if (ops->get_tx_queues) {
979 err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues);
980 if (err)
981 goto err;
982 }
978 err = -ENOMEM; 983 err = -ENOMEM;
979 dev = alloc_netdev(ops->priv_size, ifname, ops->setup); 984 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
980 if (!dev) 985 if (!dev)
981 goto err; 986 goto err;
982 987
988 dev->real_num_tx_queues = real_num_queues;
983 if (strchr(dev->name, '%')) { 989 if (strchr(dev->name, '%')) {
984 err = dev_alloc_name(dev, dev->name); 990 err = dev_alloc_name(dev, dev->name);
985 if (err < 0) 991 if (err < 0)
diff --git a/net/core/sock.c b/net/core/sock.c
index 3ac34ea6ec05..30d5446512f9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1035,6 +1035,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1035 sk->sk_prot = sk->sk_prot_creator = prot; 1035 sk->sk_prot = sk->sk_prot_creator = prot;
1036 sock_lock_init(sk); 1036 sock_lock_init(sk);
1037 sock_net_set(sk, get_net(net)); 1037 sock_net_set(sk, get_net(net));
1038 atomic_set(&sk->sk_wmem_alloc, 1);
1038 } 1039 }
1039 1040
1040 return sk; 1041 return sk;
@@ -1882,7 +1883,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1882 */ 1883 */
1883 smp_wmb(); 1884 smp_wmb();
1884 atomic_set(&sk->sk_refcnt, 1); 1885 atomic_set(&sk->sk_refcnt, 1);
1885 atomic_set(&sk->sk_wmem_alloc, 1);
1886 atomic_set(&sk->sk_drops, 0); 1886 atomic_set(&sk->sk_drops, 0);
1887} 1887}
1888EXPORT_SYMBOL(sock_init_data); 1888EXPORT_SYMBOL(sock_init_data);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 8379496de82b..e0879bfb7dd5 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -64,6 +64,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
64 [DCB_ATTR_CAP] = {.type = NLA_NESTED}, 64 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
65 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, 65 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
66 [DCB_ATTR_BCN] = {.type = NLA_NESTED}, 66 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
67 [DCB_ATTR_APP] = {.type = NLA_NESTED},
67}; 68};
68 69
69/* DCB priority flow control to User Priority nested attributes */ 70/* DCB priority flow control to User Priority nested attributes */
@@ -158,6 +159,13 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
158 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, 159 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
159}; 160};
160 161
162/* DCB APP nested attributes. */
163static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
165 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
167};
168
161/* standard netlink reply call */ 169/* standard netlink reply call */
162static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, 170static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
163 u32 seq, u16 flags) 171 u32 seq, u16 flags)
@@ -536,6 +544,120 @@ static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
536 return ret; 544 return ret;
537} 545}
538 546
547static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
548 u32 pid, u32 seq, u16 flags)
549{
550 struct sk_buff *dcbnl_skb;
551 struct nlmsghdr *nlh;
552 struct dcbmsg *dcb;
553 struct nlattr *app_nest;
554 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
555 u16 id;
556 u8 up, idtype;
557 int ret = -EINVAL;
558
559 if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
560 goto out;
561
562 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
563 dcbnl_app_nest);
564 if (ret)
565 goto out;
566
567 ret = -EINVAL;
568 /* all must be non-null */
569 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
570 (!app_tb[DCB_APP_ATTR_ID]))
571 goto out;
572
573 /* either by eth type or by socket number */
574 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
575 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
576 (idtype != DCB_APP_IDTYPE_PORTNUM))
577 goto out;
578
579 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
580 up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
581
582 /* send this back */
583 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
584 if (!dcbnl_skb)
585 goto out;
586
587 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
588 dcb = NLMSG_DATA(nlh);
589 dcb->dcb_family = AF_UNSPEC;
590 dcb->cmd = DCB_CMD_GAPP;
591
592 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
593 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
594 if (ret)
595 goto out_cancel;
596
597 ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
598 if (ret)
599 goto out_cancel;
600
601 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
602 if (ret)
603 goto out_cancel;
604
605 nla_nest_end(dcbnl_skb, app_nest);
606 nlmsg_end(dcbnl_skb, nlh);
607
608 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
609 if (ret)
610 goto nlmsg_failure;
611
612 goto out;
613
614out_cancel:
615 nla_nest_cancel(dcbnl_skb, app_nest);
616nlmsg_failure:
617 kfree_skb(dcbnl_skb);
618out:
619 return ret;
620}
621
622static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
623 u32 pid, u32 seq, u16 flags)
624{
625 int ret = -EINVAL;
626 u16 id;
627 u8 up, idtype;
628 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
629
630 if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->setapp)
631 goto out;
632
633 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
634 dcbnl_app_nest);
635 if (ret)
636 goto out;
637
638 ret = -EINVAL;
639 /* all must be non-null */
640 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
641 (!app_tb[DCB_APP_ATTR_ID]) ||
642 (!app_tb[DCB_APP_ATTR_PRIORITY]))
643 goto out;
644
645 /* either by eth type or by socket number */
646 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
647 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
648 (idtype != DCB_APP_IDTYPE_PORTNUM))
649 goto out;
650
651 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
652 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
653
654 ret = dcbnl_reply(netdev->dcbnl_ops->setapp(netdev, idtype, id, up),
655 RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
656 pid, seq, flags);
657out:
658 return ret;
659}
660
539static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, 661static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
540 u32 pid, u32 seq, u16 flags, int dir) 662 u32 pid, u32 seq, u16 flags, int dir)
541{ 663{
@@ -1093,6 +1215,14 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1093 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq, 1215 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1094 nlh->nlmsg_flags); 1216 nlh->nlmsg_flags);
1095 goto out; 1217 goto out;
1218 case DCB_CMD_GAPP:
1219 ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
1220 nlh->nlmsg_flags);
1221 goto out;
1222 case DCB_CMD_SAPP:
1223 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
1224 nlh->nlmsg_flags);
1225 goto out;
1096 default: 1226 default:
1097 goto errout; 1227 goto errout;
1098 } 1228 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index a0a36c9e6cce..d01c00de1ad0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -880,7 +880,7 @@ discard_and_relse:
880 goto discard_it; 880 goto discard_it;
881} 881}
882 882
883static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { 883static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
884 .queue_xmit = ip_queue_xmit, 884 .queue_xmit = ip_queue_xmit,
885 .send_check = dccp_v4_send_check, 885 .send_check = dccp_v4_send_check,
886 .rebuild_header = inet_sk_rebuild_header, 886 .rebuild_header = inet_sk_rebuild_header,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3e70faab2989..64f011cc4491 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -35,8 +35,8 @@
35 35
36/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ 36/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
37 37
38static struct inet_connection_sock_af_ops dccp_ipv6_mapped; 38static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
39static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; 39static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
40 40
41static void dccp_v6_hash(struct sock *sk) 41static void dccp_v6_hash(struct sock *sk)
42{ 42{
@@ -1055,7 +1055,7 @@ failure:
1055 return err; 1055 return err;
1056} 1056}
1057 1057
1058static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { 1058static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1059 .queue_xmit = inet6_csk_xmit, 1059 .queue_xmit = inet6_csk_xmit,
1060 .send_check = dccp_v6_send_check, 1060 .send_check = dccp_v6_send_check,
1061 .rebuild_header = inet6_sk_rebuild_header, 1061 .rebuild_header = inet6_sk_rebuild_header,
@@ -1076,7 +1076,7 @@ static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1076/* 1076/*
1077 * DCCP over IPv4 via INET6 API 1077 * DCCP over IPv4 via INET6 API
1078 */ 1078 */
1079static struct inet_connection_sock_af_ops dccp_ipv6_mapped = { 1079static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1080 .queue_xmit = ip_queue_xmit, 1080 .queue_xmit = ip_queue_xmit,
1081 .send_check = dccp_v4_send_check, 1081 .send_check = dccp_v4_send_check,
1082 .rebuild_header = inet_sk_rebuild_header, 1082 .rebuild_header = inet_sk_rebuild_header,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 37b3b4293ef4..923db06c7e55 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1066,7 +1066,7 @@ static int __init dccp_init(void)
1066 (dccp_hashinfo.ehash_size - 1)) 1066 (dccp_hashinfo.ehash_size - 1))
1067 dccp_hashinfo.ehash_size--; 1067 dccp_hashinfo.ehash_size--;
1068 dccp_hashinfo.ehash = (struct inet_ehash_bucket *) 1068 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1069 __get_free_pages(GFP_ATOMIC, ehash_order); 1069 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1070 } while (!dccp_hashinfo.ehash && --ehash_order > 0); 1070 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1071 1071
1072 if (!dccp_hashinfo.ehash) { 1072 if (!dccp_hashinfo.ehash) {
@@ -1091,7 +1091,7 @@ static int __init dccp_init(void)
1091 bhash_order > 0) 1091 bhash_order > 0)
1092 continue; 1092 continue;
1093 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *) 1093 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1094 __get_free_pages(GFP_ATOMIC, bhash_order); 1094 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1095 } while (!dccp_hashinfo.bhash && --bhash_order >= 0); 1095 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1096 1096
1097 if (!dccp_hashinfo.bhash) { 1097 if (!dccp_hashinfo.bhash) {
@@ -1159,6 +1159,7 @@ static void __exit dccp_fini(void)
1159 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1159 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1160 dccp_ackvec_exit(); 1160 dccp_ackvec_exit();
1161 dccp_sysctl_exit(); 1161 dccp_sysctl_exit();
1162 percpu_counter_destroy(&dccp_orphan_count);
1162} 1163}
1163 1164
1164module_init(dccp_init); 1165module_init(dccp_init);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 923786bd6d01..794b5bf95af1 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -59,7 +59,7 @@ static int dn_phase3_output(struct sk_buff *);
59/* 59/*
60 * For talking to broadcast devices: Ethernet & PPP 60 * For talking to broadcast devices: Ethernet & PPP
61 */ 61 */
62static struct neigh_ops dn_long_ops = { 62static const struct neigh_ops dn_long_ops = {
63 .family = AF_DECnet, 63 .family = AF_DECnet,
64 .error_report = dn_long_error_report, 64 .error_report = dn_long_error_report,
65 .output = dn_long_output, 65 .output = dn_long_output,
@@ -71,7 +71,7 @@ static struct neigh_ops dn_long_ops = {
71/* 71/*
72 * For talking to pointopoint and multidrop devices: DDCMP and X.25 72 * For talking to pointopoint and multidrop devices: DDCMP and X.25
73 */ 73 */
74static struct neigh_ops dn_short_ops = { 74static const struct neigh_ops dn_short_ops = {
75 .family = AF_DECnet, 75 .family = AF_DECnet,
76 .error_report = dn_short_error_report, 76 .error_report = dn_short_error_report,
77 .output = dn_short_output, 77 .output = dn_short_output,
@@ -83,7 +83,7 @@ static struct neigh_ops dn_short_ops = {
83/* 83/*
84 * For talking to DECnet phase III nodes 84 * For talking to DECnet phase III nodes
85 */ 85 */
86static struct neigh_ops dn_phase3_ops = { 86static const struct neigh_ops dn_phase3_ops = {
87 .family = AF_DECnet, 87 .family = AF_DECnet,
88 .error_report = dn_short_error_report, /* Can use short version here */ 88 .error_report = dn_short_error_report, /* Can use short version here */
89 .output = dn_phase3_output, 89 .output = dn_phase3_output,
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 41055f33d28a..4b0ea0540442 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -169,13 +169,13 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds,
169 int port, char *name); 169 int port, char *name);
170 170
171/* tag_dsa.c */ 171/* tag_dsa.c */
172int dsa_xmit(struct sk_buff *skb, struct net_device *dev); 172netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
173 173
174/* tag_edsa.c */ 174/* tag_edsa.c */
175int edsa_xmit(struct sk_buff *skb, struct net_device *dev); 175netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
176 176
177/* tag_trailer.c */ 177/* tag_trailer.c */
178int trailer_xmit(struct sk_buff *skb, struct net_device *dev); 178netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
179 179
180 180
181#endif 181#endif
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 8fa25bafe6ca..cdf2d28a0297 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -15,7 +15,7 @@
15 15
16#define DSA_HLEN 4 16#define DSA_HLEN 4
17 17
18int dsa_xmit(struct sk_buff *skb, struct net_device *dev) 18netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
19{ 19{
20 struct dsa_slave_priv *p = netdev_priv(dev); 20 struct dsa_slave_priv *p = netdev_priv(dev);
21 u8 *dsa_header; 21 u8 *dsa_header;
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 815607bd286f..8f53948cff4f 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -16,7 +16,7 @@
16#define DSA_HLEN 4 16#define DSA_HLEN 4
17#define EDSA_HLEN 8 17#define EDSA_HLEN 8
18 18
19int edsa_xmit(struct sk_buff *skb, struct net_device *dev) 19netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
20{ 20{
21 struct dsa_slave_priv *p = netdev_priv(dev); 21 struct dsa_slave_priv *p = netdev_priv(dev);
22 u8 *edsa_header; 22 u8 *edsa_header;
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 1c3e30c38b86..a85c829853c0 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,7 +13,7 @@
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include "dsa_priv.h" 14#include "dsa_priv.h"
15 15
16int trailer_xmit(struct sk_buff *skb, struct net_device *dev) 16netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
17{ 17{
18 struct dsa_slave_priv *p = netdev_priv(dev); 18 struct dsa_slave_priv *p = netdev_priv(dev);
19 struct sk_buff *nskb; 19 struct sk_buff *nskb;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index e114da7ca9b8..0e0254fd767d 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
520 if (peer) 520 if (peer)
521 return -EOPNOTSUPP; 521 return -EOPNOTSUPP;
522 522
523 memset(sec, 0, sizeof(*sec));
523 mutex_lock(&econet_mutex); 524 mutex_lock(&econet_mutex);
524 525
525 sk = sock->sk; 526 sk = sock->sk;
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index f99338a26100..4068a9f5113e 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o wpan-class.o
2nl802154-y := netlink.o nl_policy.o 2nl802154-y := netlink.o nl_policy.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3af_802154-y := af_ieee802154.o raw.o dgram.o
4 4
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 69c8d9207aa7..cd949d5e451b 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -136,7 +136,7 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
136 unsigned int cmd) 136 unsigned int cmd)
137{ 137{
138 struct ifreq ifr; 138 struct ifreq ifr;
139 int ret = -EINVAL; 139 int ret = -ENOIOCTLCMD;
140 struct net_device *dev; 140 struct net_device *dev;
141 141
142 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 142 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
@@ -146,8 +146,8 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
146 146
147 dev_load(sock_net(sk), ifr.ifr_name); 147 dev_load(sock_net(sk), ifr.ifr_name);
148 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); 148 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
149 if (dev->type == ARPHRD_IEEE802154 || 149
150 dev->type == ARPHRD_IEEE802154_PHY) 150 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
151 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); 151 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
152 152
153 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) 153 if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 53dd912d52b4..77ae6852b93d 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -40,9 +40,11 @@ static DEFINE_RWLOCK(dgram_lock);
40struct dgram_sock { 40struct dgram_sock {
41 struct sock sk; 41 struct sock sk;
42 42
43 int bound;
44 struct ieee802154_addr src_addr; 43 struct ieee802154_addr src_addr;
45 struct ieee802154_addr dst_addr; 44 struct ieee802154_addr dst_addr;
45
46 unsigned bound:1;
47 unsigned want_ack:1;
46}; 48};
47 49
48static inline struct dgram_sock *dgram_sk(const struct sock *sk) 50static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -50,7 +52,6 @@ static inline struct dgram_sock *dgram_sk(const struct sock *sk)
50 return container_of(sk, struct dgram_sock, sk); 52 return container_of(sk, struct dgram_sock, sk);
51} 53}
52 54
53
54static void dgram_hash(struct sock *sk) 55static void dgram_hash(struct sock *sk)
55{ 56{
56 write_lock_bh(&dgram_lock); 57 write_lock_bh(&dgram_lock);
@@ -73,6 +74,7 @@ static int dgram_init(struct sock *sk)
73 74
74 ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; 75 ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
75 ro->dst_addr.pan_id = 0xffff; 76 ro->dst_addr.pan_id = 0xffff;
77 ro->want_ack = 1;
76 memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); 78 memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
77 return 0; 79 return 0;
78} 80}
@@ -86,18 +88,18 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
86{ 88{
87 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; 89 struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
88 struct dgram_sock *ro = dgram_sk(sk); 90 struct dgram_sock *ro = dgram_sk(sk);
89 int err = 0; 91 int err = -EINVAL;
90 struct net_device *dev; 92 struct net_device *dev;
91 93
94 lock_sock(sk);
95
92 ro->bound = 0; 96 ro->bound = 0;
93 97
94 if (len < sizeof(*addr)) 98 if (len < sizeof(*addr))
95 return -EINVAL; 99 goto out;
96 100
97 if (addr->family != AF_IEEE802154) 101 if (addr->family != AF_IEEE802154)
98 return -EINVAL; 102 goto out;
99
100 lock_sock(sk);
101 103
102 dev = ieee802154_get_dev(sock_net(sk), &addr->addr); 104 dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
103 if (!dev) { 105 if (!dev) {
@@ -113,6 +115,7 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
113 memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr)); 115 memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr));
114 116
115 ro->bound = 1; 117 ro->bound = 1;
118 err = 0;
116out_put: 119out_put:
117 dev_put(dev); 120 dev_put(dev);
118out: 121out:
@@ -235,7 +238,10 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
235 238
236 skb_reset_network_header(skb); 239 skb_reset_network_header(skb);
237 240
238 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA | MAC_CB_FLAG_ACKREQ; 241 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
242 if (ro->want_ack)
243 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
244
239 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); 245 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
240 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, 246 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
241 ro->bound ? &ro->src_addr : NULL, size); 247 ro->bound ? &ro->src_addr : NULL, size);
@@ -377,6 +383,64 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
377 return ret; 383 return ret;
378} 384}
379 385
386static int dgram_getsockopt(struct sock *sk, int level, int optname,
387 char __user *optval, int __user *optlen)
388{
389 struct dgram_sock *ro = dgram_sk(sk);
390
391 int val, len;
392
393 if (level != SOL_IEEE802154)
394 return -EOPNOTSUPP;
395
396 if (get_user(len, optlen))
397 return -EFAULT;
398
399 len = min_t(unsigned int, len, sizeof(int));
400
401 switch (optname) {
402 case WPAN_WANTACK:
403 val = ro->want_ack;
404 break;
405 default:
406 return -ENOPROTOOPT;
407 }
408
409 if (put_user(len, optlen))
410 return -EFAULT;
411 if (copy_to_user(optval, &val, len))
412 return -EFAULT;
413 return 0;
414}
415
416static int dgram_setsockopt(struct sock *sk, int level, int optname,
417 char __user *optval, int __user optlen)
418{
419 struct dgram_sock *ro = dgram_sk(sk);
420 int val;
421 int err = 0;
422
423 if (optlen < sizeof(int))
424 return -EINVAL;
425
426 if (get_user(val, (int __user *)optval))
427 return -EFAULT;
428
429 lock_sock(sk);
430
431 switch (optname) {
432 case WPAN_WANTACK:
433 ro->want_ack = !!val;
434 break;
435 default:
436 err = -ENOPROTOOPT;
437 break;
438 }
439
440 release_sock(sk);
441 return err;
442}
443
380struct proto ieee802154_dgram_prot = { 444struct proto ieee802154_dgram_prot = {
381 .name = "IEEE-802.15.4-MAC", 445 .name = "IEEE-802.15.4-MAC",
382 .owner = THIS_MODULE, 446 .owner = THIS_MODULE,
@@ -391,5 +455,7 @@ struct proto ieee802154_dgram_prot = {
391 .connect = dgram_connect, 455 .connect = dgram_connect,
392 .disconnect = dgram_disconnect, 456 .disconnect = dgram_disconnect,
393 .ioctl = dgram_ioctl, 457 .ioctl = dgram_ioctl,
458 .getsockopt = dgram_getsockopt,
459 .setsockopt = dgram_setsockopt,
394}; 460};
395 461
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index a615b9d13212..2106ecbf0308 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -19,6 +19,7 @@
19 * Written by: 19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org> 20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 * Maxim Osipov <maxim.osipov@siemens.com>
22 */ 23 */
23 24
24#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -26,9 +27,11 @@
26#include <linux/netdevice.h> 27#include <linux/netdevice.h>
27#include <net/netlink.h> 28#include <net/netlink.h>
28#include <net/genetlink.h> 29#include <net/genetlink.h>
30#include <net/sock.h>
29#include <linux/nl802154.h> 31#include <linux/nl802154.h>
30#include <net/af_ieee802154.h> 32#include <net/af_ieee802154.h>
31#include <net/nl802154.h> 33#include <net/nl802154.h>
34#include <net/ieee802154.h>
32#include <net/ieee802154_netdev.h> 35#include <net/ieee802154_netdev.h>
33 36
34static unsigned int ieee802154_seq_num; 37static unsigned int ieee802154_seq_num;
@@ -73,7 +76,7 @@ static int ieee802154_nl_finish(struct sk_buff *msg)
73 /* XXX: nlh is right at the start of msg */ 76 /* XXX: nlh is right at the start of msg */
74 void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); 77 void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
75 78
76 if (!genlmsg_end(msg, hdr)) 79 if (genlmsg_end(msg, hdr) < 0)
77 goto out; 80 goto out;
78 81
79 return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id, 82 return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id,
@@ -229,7 +232,7 @@ nla_put_failure:
229EXPORT_SYMBOL(ieee802154_nl_beacon_indic); 232EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
230 233
231int ieee802154_nl_scan_confirm(struct net_device *dev, 234int ieee802154_nl_scan_confirm(struct net_device *dev,
232 u8 status, u8 scan_type, u32 unscanned, 235 u8 status, u8 scan_type, u32 unscanned, u8 page,
233 u8 *edl/* , struct list_head *pan_desc_list */) 236 u8 *edl/* , struct list_head *pan_desc_list */)
234{ 237{
235 struct sk_buff *msg; 238 struct sk_buff *msg;
@@ -248,6 +251,7 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
248 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 251 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
249 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); 252 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
250 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); 253 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
254 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
251 255
252 if (edl) 256 if (edl)
253 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); 257 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
@@ -260,6 +264,60 @@ nla_put_failure:
260} 264}
261EXPORT_SYMBOL(ieee802154_nl_scan_confirm); 265EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
262 266
267int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
268{
269 struct sk_buff *msg;
270
271 pr_debug("%s\n", __func__);
272
273 msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
274 if (!msg)
275 return -ENOBUFS;
276
277 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
278 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
279 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
280 dev->dev_addr);
281
282 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
283
284 return ieee802154_nl_finish(msg);
285
286nla_put_failure:
287 nlmsg_free(msg);
288 return -ENOBUFS;
289}
290EXPORT_SYMBOL(ieee802154_nl_start_confirm);
291
292static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
293 u32 seq, int flags, struct net_device *dev)
294{
295 void *hdr;
296
297 pr_debug("%s\n", __func__);
298
299 hdr = genlmsg_put(msg, 0, seq, &ieee802154_coordinator_family, flags,
300 IEEE802154_LIST_IFACE);
301 if (!hdr)
302 goto out;
303
304 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
305 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
306
307 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
308 dev->dev_addr);
309 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
310 ieee802154_mlme_ops(dev)->get_short_addr(dev));
311 NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
312 ieee802154_mlme_ops(dev)->get_pan_id(dev));
313 return genlmsg_end(msg, hdr);
314
315nla_put_failure:
316 genlmsg_cancel(msg, hdr);
317out:
318 return -EMSGSIZE;
319}
320
263/* Requests from userspace */ 321/* Requests from userspace */
264static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) 322static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
265{ 323{
@@ -272,7 +330,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
272 dev = dev_get_by_name(&init_net, name); 330 dev = dev_get_by_name(&init_net, name);
273 } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) 331 } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
274 dev = dev_get_by_index(&init_net, 332 dev = dev_get_by_index(&init_net,
275 nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); 333 nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
276 else 334 else
277 return NULL; 335 return NULL;
278 336
@@ -292,6 +350,7 @@ static int ieee802154_associate_req(struct sk_buff *skb,
292{ 350{
293 struct net_device *dev; 351 struct net_device *dev;
294 struct ieee802154_addr addr; 352 struct ieee802154_addr addr;
353 u8 page;
295 int ret = -EINVAL; 354 int ret = -EINVAL;
296 355
297 if (!info->attrs[IEEE802154_ATTR_CHANNEL] || 356 if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
@@ -317,8 +376,14 @@ static int ieee802154_associate_req(struct sk_buff *skb,
317 } 376 }
318 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); 377 addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
319 378
379 if (info->attrs[IEEE802154_ATTR_PAGE])
380 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
381 else
382 page = 0;
383
320 ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, 384 ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
321 nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), 385 nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
386 page,
322 nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); 387 nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
323 388
324 dev_put(dev); 389 dev_put(dev);
@@ -401,6 +466,7 @@ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
401 struct ieee802154_addr addr; 466 struct ieee802154_addr addr;
402 467
403 u8 channel, bcn_ord, sf_ord; 468 u8 channel, bcn_ord, sf_ord;
469 u8 page;
404 int pan_coord, blx, coord_realign; 470 int pan_coord, blx, coord_realign;
405 int ret; 471 int ret;
406 472
@@ -431,7 +497,19 @@ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
431 blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); 497 blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
432 coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); 498 coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
433 499
434 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, 500 if (info->attrs[IEEE802154_ATTR_PAGE])
501 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
502 else
503 page = 0;
504
505
506 if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
507 ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
508 dev_put(dev);
509 return -EINVAL;
510 }
511
512 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
435 bcn_ord, sf_ord, pan_coord, blx, coord_realign); 513 bcn_ord, sf_ord, pan_coord, blx, coord_realign);
436 514
437 dev_put(dev); 515 dev_put(dev);
@@ -445,6 +523,7 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
445 u8 type; 523 u8 type;
446 u32 channels; 524 u32 channels;
447 u8 duration; 525 u8 duration;
526 u8 page;
448 527
449 if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || 528 if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
450 !info->attrs[IEEE802154_ATTR_CHANNELS] || 529 !info->attrs[IEEE802154_ATTR_CHANNELS] ||
@@ -459,13 +538,80 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
459 channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); 538 channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
460 duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); 539 duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
461 540
462 ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, 541 if (info->attrs[IEEE802154_ATTR_PAGE])
542 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
543 else
544 page = 0;
545
546
547 ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
463 duration); 548 duration);
464 549
465 dev_put(dev); 550 dev_put(dev);
466 return ret; 551 return ret;
467} 552}
468 553
554static int ieee802154_list_iface(struct sk_buff *skb,
555 struct genl_info *info)
556{
557 /* Request for interface name, index, type, IEEE address,
558 PAN Id, short address */
559 struct sk_buff *msg;
560 struct net_device *dev = NULL;
561 int rc = -ENOBUFS;
562
563 pr_debug("%s\n", __func__);
564
565 dev = ieee802154_nl_get_dev(info);
566 if (!dev)
567 return -ENODEV;
568
569 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
570 if (!msg)
571 goto out_dev;
572
573 rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
574 0, dev);
575 if (rc < 0)
576 goto out_free;
577
578 dev_put(dev);
579
580 return genlmsg_unicast(&init_net, msg, info->snd_pid);
581out_free:
582 nlmsg_free(msg);
583out_dev:
584 dev_put(dev);
585 return rc;
586
587}
588
589static int ieee802154_dump_iface(struct sk_buff *skb,
590 struct netlink_callback *cb)
591{
592 struct net *net = sock_net(skb->sk);
593 struct net_device *dev;
594 int idx;
595 int s_idx = cb->args[0];
596
597 pr_debug("%s\n", __func__);
598
599 idx = 0;
600 for_each_netdev(net, dev) {
601 if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
602 goto cont;
603
604 if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
605 cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
606 break;
607cont:
608 idx++;
609 }
610 cb->args[0] = idx;
611
612 return skb->len;
613}
614
469#define IEEE802154_OP(_cmd, _func) \ 615#define IEEE802154_OP(_cmd, _func) \
470 { \ 616 { \
471 .cmd = _cmd, \ 617 .cmd = _cmd, \
@@ -475,12 +621,22 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
475 .flags = GENL_ADMIN_PERM, \ 621 .flags = GENL_ADMIN_PERM, \
476 } 622 }
477 623
624#define IEEE802154_DUMP(_cmd, _func, _dump) \
625 { \
626 .cmd = _cmd, \
627 .policy = ieee802154_policy, \
628 .doit = _func, \
629 .dumpit = _dump, \
630 }
631
478static struct genl_ops ieee802154_coordinator_ops[] = { 632static struct genl_ops ieee802154_coordinator_ops[] = {
479 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), 633 IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
480 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), 634 IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
481 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), 635 IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
482 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), 636 IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
483 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), 637 IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
638 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
639 ieee802154_dump_iface),
484}; 640};
485 641
486static int __init ieee802154_nl_init(void) 642static int __init ieee802154_nl_init(void)
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index c7d71d1adcac..2363ebee02e7 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -24,7 +24,7 @@
24 24
25#define NLA_HW_ADDR NLA_U64 25#define NLA_HW_ADDR NLA_U64
26 26
27struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { 27const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
28 [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, 28 [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
29 [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, 29 [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
30 30
@@ -33,6 +33,7 @@ struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
33 [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, }, 33 [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
34 [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, }, 34 [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
35 [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, }, 35 [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
36 [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
36 [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, }, 37 [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
37 [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, }, 38 [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
38 [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, }, 39 [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
@@ -50,3 +51,4 @@ struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
50 [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, 51 [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
51 [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, 52 [IEEE802154_ATTR_ED_LIST] = { .len = 27 },
52}; 53};
54
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index ea8d1f15206e..4681501aae93 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -74,8 +74,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len)
74 goto out; 74 goto out;
75 } 75 }
76 76
77 if (dev->type != ARPHRD_IEEE802154_PHY && 77 if (dev->type != ARPHRD_IEEE802154) {
78 dev->type != ARPHRD_IEEE802154) {
79 err = -ENODEV; 78 err = -ENODEV;
80 goto out_put; 79 goto out_put;
81 } 80 }
@@ -238,6 +237,18 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
238 read_unlock(&raw_lock); 237 read_unlock(&raw_lock);
239} 238}
240 239
240static int raw_getsockopt(struct sock *sk, int level, int optname,
241 char __user *optval, int __user *optlen)
242{
243 return -EOPNOTSUPP;
244}
245
246static int raw_setsockopt(struct sock *sk, int level, int optname,
247 char __user *optval, int __user optlen)
248{
249 return -EOPNOTSUPP;
250}
251
241struct proto ieee802154_raw_prot = { 252struct proto ieee802154_raw_prot = {
242 .name = "IEEE-802.15.4-RAW", 253 .name = "IEEE-802.15.4-RAW",
243 .owner = THIS_MODULE, 254 .owner = THIS_MODULE,
@@ -250,5 +261,7 @@ struct proto ieee802154_raw_prot = {
250 .unhash = raw_unhash, 261 .unhash = raw_unhash,
251 .connect = raw_connect, 262 .connect = raw_connect,
252 .disconnect = raw_disconnect, 263 .disconnect = raw_disconnect,
264 .getsockopt = raw_getsockopt,
265 .setsockopt = raw_setsockopt,
253}; 266};
254 267
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
new file mode 100644
index 000000000000..f306604da67a
--- /dev/null
+++ b/net/ieee802154/wpan-class.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright (C) 2007, 2008, 2009 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/device.h>
22
23#include <net/wpan-phy.h>
24
25#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
26static ssize_t name ## _show(struct device *dev, \
27 struct device_attribute *attr, char *buf) \
28{ \
29 struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
30 int ret; \
31 \
32 mutex_lock(&phy->pib_lock); \
33 ret = sprintf(buf, format_string "\n", args); \
34 mutex_unlock(&phy->pib_lock); \
35 return ret; \
36}
37
38#define MASTER_SHOW(field, format_string) \
39 MASTER_SHOW_COMPLEX(field, format_string, phy->field)
40
41MASTER_SHOW(current_channel, "%d");
42MASTER_SHOW(current_page, "%d");
43MASTER_SHOW(channels_supported, "%#x");
44MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
45 ((signed char) (phy->transmit_power << 2)) >> 2,
46 (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 );
47MASTER_SHOW(cca_mode, "%d");
48
49static struct device_attribute pmib_attrs[] = {
50 __ATTR_RO(current_channel),
51 __ATTR_RO(current_page),
52 __ATTR_RO(channels_supported),
53 __ATTR_RO(transmit_power),
54 __ATTR_RO(cca_mode),
55 {},
56};
57
58static void wpan_phy_release(struct device *d)
59{
60 struct wpan_phy *phy = container_of(d, struct wpan_phy, dev);
61 kfree(phy);
62}
63
64static struct class wpan_phy_class = {
65 .name = "ieee802154",
66 .dev_release = wpan_phy_release,
67 .dev_attrs = pmib_attrs,
68};
69
70static DEFINE_MUTEX(wpan_phy_mutex);
71static int wpan_phy_idx;
72
73static int wpan_phy_match(struct device *dev, void *data)
74{
75 return !strcmp(dev_name(dev), (const char *)data);
76}
77
78struct wpan_phy *wpan_phy_find(const char *str)
79{
80 struct device *dev;
81
82 if (WARN_ON(!str))
83 return NULL;
84
85 dev = class_find_device(&wpan_phy_class, NULL,
86 (void *)str, wpan_phy_match);
87 if (!dev)
88 return NULL;
89
90 return container_of(dev, struct wpan_phy, dev);
91}
92EXPORT_SYMBOL(wpan_phy_find);
93
94static int wpan_phy_idx_valid(int idx)
95{
96 return idx >= 0;
97}
98
99struct wpan_phy *wpan_phy_alloc(size_t priv_size)
100{
101 struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size,
102 GFP_KERNEL);
103
104 mutex_lock(&wpan_phy_mutex);
105 phy->idx = wpan_phy_idx++;
106 if (unlikely(!wpan_phy_idx_valid(phy->idx))) {
107 wpan_phy_idx--;
108 mutex_unlock(&wpan_phy_mutex);
109 kfree(phy);
110 return NULL;
111 }
112 mutex_unlock(&wpan_phy_mutex);
113
114 mutex_init(&phy->pib_lock);
115
116 device_initialize(&phy->dev);
117 dev_set_name(&phy->dev, "wpan-phy%d", phy->idx);
118
119 phy->dev.class = &wpan_phy_class;
120
121 return phy;
122}
123EXPORT_SYMBOL(wpan_phy_alloc);
124
125int wpan_phy_register(struct device *parent, struct wpan_phy *phy)
126{
127 phy->dev.parent = parent;
128
129 return device_add(&phy->dev);
130}
131EXPORT_SYMBOL(wpan_phy_register);
132
133void wpan_phy_unregister(struct wpan_phy *phy)
134{
135 device_del(&phy->dev);
136}
137EXPORT_SYMBOL(wpan_phy_unregister);
138
139void wpan_phy_free(struct wpan_phy *phy)
140{
141 put_device(&phy->dev);
142}
143EXPORT_SYMBOL(wpan_phy_free);
144
145static int __init wpan_phy_class_init(void)
146{
147 return class_register(&wpan_phy_class);
148}
149subsys_initcall(wpan_phy_class_init);
150
151static void __exit wpan_phy_class_exit(void)
152{
153 class_unregister(&wpan_phy_class);
154}
155module_exit(wpan_phy_class_exit);
156
157MODULE_DESCRIPTION("IEEE 802.15.4 device class");
158MODULE_LICENSE("GPL v2");
159
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 197d024b2536..6c30a73f03f5 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -124,7 +124,6 @@ static struct list_head inetsw[SOCK_MAX];
124static DEFINE_SPINLOCK(inetsw_lock); 124static DEFINE_SPINLOCK(inetsw_lock);
125 125
126struct ipv4_config ipv4_config; 126struct ipv4_config ipv4_config;
127
128EXPORT_SYMBOL(ipv4_config); 127EXPORT_SYMBOL(ipv4_config);
129 128
130/* New destruction routine */ 129/* New destruction routine */
@@ -139,12 +138,12 @@ void inet_sock_destruct(struct sock *sk)
139 sk_mem_reclaim(sk); 138 sk_mem_reclaim(sk);
140 139
141 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { 140 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
142 printk("Attempt to release TCP socket in state %d %p\n", 141 pr_err("Attempt to release TCP socket in state %d %p\n",
143 sk->sk_state, sk); 142 sk->sk_state, sk);
144 return; 143 return;
145 } 144 }
146 if (!sock_flag(sk, SOCK_DEAD)) { 145 if (!sock_flag(sk, SOCK_DEAD)) {
147 printk("Attempt to release alive inet socket %p\n", sk); 146 pr_err("Attempt to release alive inet socket %p\n", sk);
148 return; 147 return;
149 } 148 }
150 149
@@ -157,6 +156,7 @@ void inet_sock_destruct(struct sock *sk)
157 dst_release(sk->sk_dst_cache); 156 dst_release(sk->sk_dst_cache);
158 sk_refcnt_debug_dec(sk); 157 sk_refcnt_debug_dec(sk);
159} 158}
159EXPORT_SYMBOL(inet_sock_destruct);
160 160
161/* 161/*
162 * The routines beyond this point handle the behaviour of an AF_INET 162 * The routines beyond this point handle the behaviour of an AF_INET
@@ -219,6 +219,7 @@ out:
219 release_sock(sk); 219 release_sock(sk);
220 return err; 220 return err;
221} 221}
222EXPORT_SYMBOL(inet_listen);
222 223
223u32 inet_ehash_secret __read_mostly; 224u32 inet_ehash_secret __read_mostly;
224EXPORT_SYMBOL(inet_ehash_secret); 225EXPORT_SYMBOL(inet_ehash_secret);
@@ -435,9 +436,11 @@ int inet_release(struct socket *sock)
435 } 436 }
436 return 0; 437 return 0;
437} 438}
439EXPORT_SYMBOL(inet_release);
438 440
439/* It is off by default, see below. */ 441/* It is off by default, see below. */
440int sysctl_ip_nonlocal_bind __read_mostly; 442int sysctl_ip_nonlocal_bind __read_mostly;
443EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
441 444
442int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 445int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
443{ 446{
@@ -519,6 +522,7 @@ out_release_sock:
519out: 522out:
520 return err; 523 return err;
521} 524}
525EXPORT_SYMBOL(inet_bind);
522 526
523int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, 527int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
524 int addr_len, int flags) 528 int addr_len, int flags)
@@ -532,6 +536,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
532 return -EAGAIN; 536 return -EAGAIN;
533 return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); 537 return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
534} 538}
539EXPORT_SYMBOL(inet_dgram_connect);
535 540
536static long inet_wait_for_connect(struct sock *sk, long timeo) 541static long inet_wait_for_connect(struct sock *sk, long timeo)
537{ 542{
@@ -641,6 +646,7 @@ sock_error:
641 sock->state = SS_DISCONNECTING; 646 sock->state = SS_DISCONNECTING;
642 goto out; 647 goto out;
643} 648}
649EXPORT_SYMBOL(inet_stream_connect);
644 650
645/* 651/*
646 * Accept a pending connection. The TCP layer now gives BSD semantics. 652 * Accept a pending connection. The TCP layer now gives BSD semantics.
@@ -668,6 +674,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
668do_err: 674do_err:
669 return err; 675 return err;
670} 676}
677EXPORT_SYMBOL(inet_accept);
671 678
672 679
673/* 680/*
@@ -699,6 +706,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
699 *uaddr_len = sizeof(*sin); 706 *uaddr_len = sizeof(*sin);
700 return 0; 707 return 0;
701} 708}
709EXPORT_SYMBOL(inet_getname);
702 710
703int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 711int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
704 size_t size) 712 size_t size)
@@ -711,9 +719,11 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
711 719
712 return sk->sk_prot->sendmsg(iocb, sk, msg, size); 720 return sk->sk_prot->sendmsg(iocb, sk, msg, size);
713} 721}
722EXPORT_SYMBOL(inet_sendmsg);
714 723
715 724
716static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 725static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
726 size_t size, int flags)
717{ 727{
718 struct sock *sk = sock->sk; 728 struct sock *sk = sock->sk;
719 729
@@ -780,6 +790,7 @@ int inet_shutdown(struct socket *sock, int how)
780 release_sock(sk); 790 release_sock(sk);
781 return err; 791 return err;
782} 792}
793EXPORT_SYMBOL(inet_shutdown);
783 794
784/* 795/*
785 * ioctl() calls you can issue on an INET socket. Most of these are 796 * ioctl() calls you can issue on an INET socket. Most of these are
@@ -798,44 +809,45 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
798 struct net *net = sock_net(sk); 809 struct net *net = sock_net(sk);
799 810
800 switch (cmd) { 811 switch (cmd) {
801 case SIOCGSTAMP: 812 case SIOCGSTAMP:
802 err = sock_get_timestamp(sk, (struct timeval __user *)arg); 813 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
803 break; 814 break;
804 case SIOCGSTAMPNS: 815 case SIOCGSTAMPNS:
805 err = sock_get_timestampns(sk, (struct timespec __user *)arg); 816 err = sock_get_timestampns(sk, (struct timespec __user *)arg);
806 break; 817 break;
807 case SIOCADDRT: 818 case SIOCADDRT:
808 case SIOCDELRT: 819 case SIOCDELRT:
809 case SIOCRTMSG: 820 case SIOCRTMSG:
810 err = ip_rt_ioctl(net, cmd, (void __user *)arg); 821 err = ip_rt_ioctl(net, cmd, (void __user *)arg);
811 break; 822 break;
812 case SIOCDARP: 823 case SIOCDARP:
813 case SIOCGARP: 824 case SIOCGARP:
814 case SIOCSARP: 825 case SIOCSARP:
815 err = arp_ioctl(net, cmd, (void __user *)arg); 826 err = arp_ioctl(net, cmd, (void __user *)arg);
816 break; 827 break;
817 case SIOCGIFADDR: 828 case SIOCGIFADDR:
818 case SIOCSIFADDR: 829 case SIOCSIFADDR:
819 case SIOCGIFBRDADDR: 830 case SIOCGIFBRDADDR:
820 case SIOCSIFBRDADDR: 831 case SIOCSIFBRDADDR:
821 case SIOCGIFNETMASK: 832 case SIOCGIFNETMASK:
822 case SIOCSIFNETMASK: 833 case SIOCSIFNETMASK:
823 case SIOCGIFDSTADDR: 834 case SIOCGIFDSTADDR:
824 case SIOCSIFDSTADDR: 835 case SIOCSIFDSTADDR:
825 case SIOCSIFPFLAGS: 836 case SIOCSIFPFLAGS:
826 case SIOCGIFPFLAGS: 837 case SIOCGIFPFLAGS:
827 case SIOCSIFFLAGS: 838 case SIOCSIFFLAGS:
828 err = devinet_ioctl(net, cmd, (void __user *)arg); 839 err = devinet_ioctl(net, cmd, (void __user *)arg);
829 break; 840 break;
830 default: 841 default:
831 if (sk->sk_prot->ioctl) 842 if (sk->sk_prot->ioctl)
832 err = sk->sk_prot->ioctl(sk, cmd, arg); 843 err = sk->sk_prot->ioctl(sk, cmd, arg);
833 else 844 else
834 err = -ENOIOCTLCMD; 845 err = -ENOIOCTLCMD;
835 break; 846 break;
836 } 847 }
837 return err; 848 return err;
838} 849}
850EXPORT_SYMBOL(inet_ioctl);
839 851
840const struct proto_ops inet_stream_ops = { 852const struct proto_ops inet_stream_ops = {
841 .family = PF_INET, 853 .family = PF_INET,
@@ -862,6 +874,7 @@ const struct proto_ops inet_stream_ops = {
862 .compat_getsockopt = compat_sock_common_getsockopt, 874 .compat_getsockopt = compat_sock_common_getsockopt,
863#endif 875#endif
864}; 876};
877EXPORT_SYMBOL(inet_stream_ops);
865 878
866const struct proto_ops inet_dgram_ops = { 879const struct proto_ops inet_dgram_ops = {
867 .family = PF_INET, 880 .family = PF_INET,
@@ -887,6 +900,7 @@ const struct proto_ops inet_dgram_ops = {
887 .compat_getsockopt = compat_sock_common_getsockopt, 900 .compat_getsockopt = compat_sock_common_getsockopt,
888#endif 901#endif
889}; 902};
903EXPORT_SYMBOL(inet_dgram_ops);
890 904
891/* 905/*
892 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without 906 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
@@ -1016,6 +1030,7 @@ out_illegal:
1016 p->type); 1030 p->type);
1017 goto out; 1031 goto out;
1018} 1032}
1033EXPORT_SYMBOL(inet_register_protosw);
1019 1034
1020void inet_unregister_protosw(struct inet_protosw *p) 1035void inet_unregister_protosw(struct inet_protosw *p)
1021{ 1036{
@@ -1031,6 +1046,7 @@ void inet_unregister_protosw(struct inet_protosw *p)
1031 synchronize_net(); 1046 synchronize_net();
1032 } 1047 }
1033} 1048}
1049EXPORT_SYMBOL(inet_unregister_protosw);
1034 1050
1035/* 1051/*
1036 * Shall we try to damage output packets if routing dev changes? 1052 * Shall we try to damage output packets if routing dev changes?
@@ -1141,7 +1157,6 @@ int inet_sk_rebuild_header(struct sock *sk)
1141 1157
1142 return err; 1158 return err;
1143} 1159}
1144
1145EXPORT_SYMBOL(inet_sk_rebuild_header); 1160EXPORT_SYMBOL(inet_sk_rebuild_header);
1146 1161
1147static int inet_gso_send_check(struct sk_buff *skb) 1162static int inet_gso_send_check(struct sk_buff *skb)
@@ -1369,7 +1384,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1369 } 1384 }
1370 return rc; 1385 return rc;
1371} 1386}
1372
1373EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1387EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1374 1388
1375unsigned long snmp_fold_field(void *mib[], int offt) 1389unsigned long snmp_fold_field(void *mib[], int offt)
@@ -1676,19 +1690,3 @@ static int __init ipv4_proc_init(void)
1676 1690
1677MODULE_ALIAS_NETPROTO(PF_INET); 1691MODULE_ALIAS_NETPROTO(PF_INET);
1678 1692
1679EXPORT_SYMBOL(inet_accept);
1680EXPORT_SYMBOL(inet_bind);
1681EXPORT_SYMBOL(inet_dgram_connect);
1682EXPORT_SYMBOL(inet_dgram_ops);
1683EXPORT_SYMBOL(inet_getname);
1684EXPORT_SYMBOL(inet_ioctl);
1685EXPORT_SYMBOL(inet_listen);
1686EXPORT_SYMBOL(inet_register_protosw);
1687EXPORT_SYMBOL(inet_release);
1688EXPORT_SYMBOL(inet_sendmsg);
1689EXPORT_SYMBOL(inet_shutdown);
1690EXPORT_SYMBOL(inet_sock_destruct);
1691EXPORT_SYMBOL(inet_stream_connect);
1692EXPORT_SYMBOL(inet_stream_ops);
1693EXPORT_SYMBOL(inet_unregister_protosw);
1694EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 090e9991ac2a..4e80f336c0cf 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -130,7 +130,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
130static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); 130static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
131static void parp_redo(struct sk_buff *skb); 131static void parp_redo(struct sk_buff *skb);
132 132
133static struct neigh_ops arp_generic_ops = { 133static const struct neigh_ops arp_generic_ops = {
134 .family = AF_INET, 134 .family = AF_INET,
135 .solicit = arp_solicit, 135 .solicit = arp_solicit,
136 .error_report = arp_error_report, 136 .error_report = arp_error_report,
@@ -140,7 +140,7 @@ static struct neigh_ops arp_generic_ops = {
140 .queue_xmit = dev_queue_xmit, 140 .queue_xmit = dev_queue_xmit,
141}; 141};
142 142
143static struct neigh_ops arp_hh_ops = { 143static const struct neigh_ops arp_hh_ops = {
144 .family = AF_INET, 144 .family = AF_INET,
145 .solicit = arp_solicit, 145 .solicit = arp_solicit,
146 .error_report = arp_error_report, 146 .error_report = arp_error_report,
@@ -150,7 +150,7 @@ static struct neigh_ops arp_hh_ops = {
150 .queue_xmit = dev_queue_xmit, 150 .queue_xmit = dev_queue_xmit,
151}; 151};
152 152
153static struct neigh_ops arp_direct_ops = { 153static const struct neigh_ops arp_direct_ops = {
154 .family = AF_INET, 154 .family = AF_INET,
155 .output = dev_queue_xmit, 155 .output = dev_queue_xmit,
156 .connected_output = dev_queue_xmit, 156 .connected_output = dev_queue_xmit,
@@ -158,7 +158,7 @@ static struct neigh_ops arp_direct_ops = {
158 .queue_xmit = dev_queue_xmit, 158 .queue_xmit = dev_queue_xmit,
159}; 159};
160 160
161struct neigh_ops arp_broken_ops = { 161const struct neigh_ops arp_broken_ops = {
162 .family = AF_INET, 162 .family = AF_INET,
163 .solicit = arp_solicit, 163 .solicit = arp_solicit,
164 .error_report = arp_error_report, 164 .error_report = arp_error_report,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index fe3c846b99a6..291bdf50a21f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -48,7 +48,7 @@
48 * Patrick McHardy <kaber@trash.net> 48 * Patrick McHardy <kaber@trash.net>
49 */ 49 */
50 50
51#define VERSION "0.408" 51#define VERSION "0.409"
52 52
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
54#include <asm/system.h> 54#include <asm/system.h>
@@ -325,10 +325,7 @@ static inline void check_tnode(const struct tnode *tn)
325static const int halve_threshold = 25; 325static const int halve_threshold = 25;
326static const int inflate_threshold = 50; 326static const int inflate_threshold = 50;
327static const int halve_threshold_root = 15; 327static const int halve_threshold_root = 15;
328static const int inflate_threshold_root = 25; 328static const int inflate_threshold_root = 30;
329
330static int inflate_threshold_root_fix;
331#define INFLATE_FIX_MAX 10 /* a comment in resize() */
332 329
333static void __alias_free_mem(struct rcu_head *head) 330static void __alias_free_mem(struct rcu_head *head)
334{ 331{
@@ -516,14 +513,14 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
516 rcu_assign_pointer(tn->child[i], n); 513 rcu_assign_pointer(tn->child[i], n);
517} 514}
518 515
516#define MAX_WORK 10
519static struct node *resize(struct trie *t, struct tnode *tn) 517static struct node *resize(struct trie *t, struct tnode *tn)
520{ 518{
521 int i; 519 int i;
522 int err = 0;
523 struct tnode *old_tn; 520 struct tnode *old_tn;
524 int inflate_threshold_use; 521 int inflate_threshold_use;
525 int halve_threshold_use; 522 int halve_threshold_use;
526 int max_resize; 523 int max_work;
527 524
528 if (!tn) 525 if (!tn)
529 return NULL; 526 return NULL;
@@ -538,18 +535,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
538 } 535 }
539 /* One child */ 536 /* One child */
540 if (tn->empty_children == tnode_child_length(tn) - 1) 537 if (tn->empty_children == tnode_child_length(tn) - 1)
541 for (i = 0; i < tnode_child_length(tn); i++) { 538 goto one_child;
542 struct node *n;
543
544 n = tn->child[i];
545 if (!n)
546 continue;
547
548 /* compress one level */
549 node_set_parent(n, NULL);
550 tnode_free_safe(tn);
551 return n;
552 }
553 /* 539 /*
554 * Double as long as the resulting node has a number of 540 * Double as long as the resulting node has a number of
555 * nonempty nodes that are above the threshold. 541 * nonempty nodes that are above the threshold.
@@ -618,15 +604,17 @@ static struct node *resize(struct trie *t, struct tnode *tn)
618 604
619 /* Keep root node larger */ 605 /* Keep root node larger */
620 606
621 if (!tn->parent) 607 if (!node_parent((struct node*) tn)) {
622 inflate_threshold_use = inflate_threshold_root + 608 inflate_threshold_use = inflate_threshold_root;
623 inflate_threshold_root_fix; 609 halve_threshold_use = halve_threshold_root;
624 else 610 }
611 else {
625 inflate_threshold_use = inflate_threshold; 612 inflate_threshold_use = inflate_threshold;
613 halve_threshold_use = halve_threshold;
614 }
626 615
627 err = 0; 616 max_work = MAX_WORK;
628 max_resize = 10; 617 while ((tn->full_children > 0 && max_work-- &&
629 while ((tn->full_children > 0 && max_resize-- &&
630 50 * (tn->full_children + tnode_child_length(tn) 618 50 * (tn->full_children + tnode_child_length(tn)
631 - tn->empty_children) 619 - tn->empty_children)
632 >= inflate_threshold_use * tnode_child_length(tn))) { 620 >= inflate_threshold_use * tnode_child_length(tn))) {
@@ -643,47 +631,19 @@ static struct node *resize(struct trie *t, struct tnode *tn)
643 } 631 }
644 } 632 }
645 633
646 if (max_resize < 0) {
647 if (!tn->parent) {
648 /*
649 * It was observed that during large updates even
650 * inflate_threshold_root = 35 might be needed to avoid
651 * this warning; but it should be temporary, so let's
652 * try to handle this automatically.
653 */
654 if (inflate_threshold_root_fix < INFLATE_FIX_MAX)
655 inflate_threshold_root_fix++;
656 else
657 pr_warning("Fix inflate_threshold_root."
658 " Now=%d size=%d bits fix=%d\n",
659 inflate_threshold_root, tn->bits,
660 inflate_threshold_root_fix);
661 } else {
662 pr_warning("Fix inflate_threshold."
663 " Now=%d size=%d bits\n",
664 inflate_threshold, tn->bits);
665 }
666 } else if (max_resize > 3 && !tn->parent && inflate_threshold_root_fix)
667 inflate_threshold_root_fix--;
668
669 check_tnode(tn); 634 check_tnode(tn);
670 635
636 /* Return if at least one inflate is run */
637 if( max_work != MAX_WORK)
638 return (struct node *) tn;
639
671 /* 640 /*
672 * Halve as long as the number of empty children in this 641 * Halve as long as the number of empty children in this
673 * node is above threshold. 642 * node is above threshold.
674 */ 643 */
675 644
676 645 max_work = MAX_WORK;
677 /* Keep root node larger */ 646 while (tn->bits > 1 && max_work-- &&
678
679 if (!tn->parent)
680 halve_threshold_use = halve_threshold_root;
681 else
682 halve_threshold_use = halve_threshold;
683
684 err = 0;
685 max_resize = 10;
686 while (tn->bits > 1 && max_resize-- &&
687 100 * (tnode_child_length(tn) - tn->empty_children) < 647 100 * (tnode_child_length(tn) - tn->empty_children) <
688 halve_threshold_use * tnode_child_length(tn)) { 648 halve_threshold_use * tnode_child_length(tn)) {
689 649
@@ -698,19 +658,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
698 } 658 }
699 } 659 }
700 660
701 if (max_resize < 0) {
702 if (!tn->parent)
703 pr_warning("Fix halve_threshold_root."
704 " Now=%d size=%d bits\n",
705 halve_threshold_root, tn->bits);
706 else
707 pr_warning("Fix halve_threshold."
708 " Now=%d size=%d bits\n",
709 halve_threshold, tn->bits);
710 }
711 661
712 /* Only one child remains */ 662 /* Only one child remains */
713 if (tn->empty_children == tnode_child_length(tn) - 1) 663 if (tn->empty_children == tnode_child_length(tn) - 1) {
664one_child:
714 for (i = 0; i < tnode_child_length(tn); i++) { 665 for (i = 0; i < tnode_child_length(tn); i++) {
715 struct node *n; 666 struct node *n;
716 667
@@ -724,7 +675,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
724 tnode_free_safe(tn); 675 tnode_free_safe(tn);
725 return n; 676 return n;
726 } 677 }
727 678 }
728 return (struct node *) tn; 679 return (struct node *) tn;
729} 680}
730 681
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 61283f928825..13f0781f35cd 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -218,8 +218,8 @@ void inet_twdr_hangman(unsigned long data)
218 /* We purged the entire slot, anything left? */ 218 /* We purged the entire slot, anything left? */
219 if (twdr->tw_count) 219 if (twdr->tw_count)
220 need_timer = 1; 220 need_timer = 1;
221 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
221 } 222 }
222 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
223 if (need_timer) 223 if (need_timer)
224 mod_timer(&twdr->tw_timer, jiffies + twdr->period); 224 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
225out: 225out:
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b902ef55be7f..533afaadefd4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -662,7 +662,7 @@ drop_nolock:
662 return(0); 662 return(0);
663} 663}
664 664
665static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 665static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
666{ 666{
667 struct ip_tunnel *tunnel = netdev_priv(dev); 667 struct ip_tunnel *tunnel = netdev_priv(dev);
668 struct net_device_stats *stats = &tunnel->dev->stats; 668 struct net_device_stats *stats = &tunnel->dev->stats;
@@ -951,7 +951,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
951 addend += 4; 951 addend += 4;
952 } 952 }
953 dev->needed_headroom = addend + hlen; 953 dev->needed_headroom = addend + hlen;
954 mtu -= dev->hard_header_len - addend; 954 mtu -= dev->hard_header_len + addend;
955 955
956 if (mtu < 68) 956 if (mtu < 68)
957 mtu = 68; 957 mtu = 68;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7d0821054729..afae0cbabbf9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1302,7 +1302,7 @@ int ip_push_pending_frames(struct sock *sk)
1302 err = ip_local_out(skb); 1302 err = ip_local_out(skb);
1303 if (err) { 1303 if (err) {
1304 if (err > 0) 1304 if (err > 0)
1305 err = inet->recverr ? net_xmit_errno(err) : 0; 1305 err = net_xmit_errno(err);
1306 if (err) 1306 if (err)
1307 goto error; 1307 goto error;
1308 } 1308 }
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 98075b6d619c..62548cb0923c 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -387,7 +387,7 @@ static int ipip_rcv(struct sk_buff *skb)
387 * and that skb is filled properly by that function. 387 * and that skb is filled properly by that function.
388 */ 388 */
389 389
390static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 390static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
391{ 391{
392 struct ip_tunnel *tunnel = netdev_priv(dev); 392 struct ip_tunnel *tunnel = netdev_priv(dev);
393 struct net_device_stats *stats = &tunnel->dev->stats; 393 struct net_device_stats *stats = &tunnel->dev->stats;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 06c33fb6b321..65d421cf5bc7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -201,7 +201,7 @@ failure:
201 201
202#ifdef CONFIG_IP_PIMSM 202#ifdef CONFIG_IP_PIMSM
203 203
204static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 204static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
205{ 205{
206 struct net *net = dev_net(dev); 206 struct net *net = dev_net(dev);
207 207
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index ea50da0649fd..a2e5fc0a15e1 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -22,26 +22,11 @@
22 * as published by the Free Software Foundation; either version 22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version. 23 * 2 of the License, or (at your option) any later version.
24 */ 24 */
25 25#include <linux/cache.h>
26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/module.h> 26#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/kernel.h>
31#include <linux/string.h>
32#include <linux/socket.h>
33#include <linux/in.h>
34#include <linux/inet.h>
35#include <linux/netdevice.h> 27#include <linux/netdevice.h>
36#include <linux/timer.h> 28#include <linux/spinlock.h>
37#include <net/ip.h>
38#include <net/protocol.h> 29#include <net/protocol.h>
39#include <linux/skbuff.h>
40#include <net/sock.h>
41#include <net/icmp.h>
42#include <net/udp.h>
43#include <net/ipip.h>
44#include <linux/igmp.h>
45 30
46struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp; 31struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
47static DEFINE_SPINLOCK(inet_proto_lock); 32static DEFINE_SPINLOCK(inet_proto_lock);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2979f14bb188..ebb1e5848bc6 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -375,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
375 err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, 375 err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
376 dst_output); 376 dst_output);
377 if (err > 0) 377 if (err > 0)
378 err = inet->recverr ? net_xmit_errno(err) : 0; 378 err = net_xmit_errno(err);
379 if (err) 379 if (err)
380 goto error; 380 goto error;
381out: 381out:
@@ -386,6 +386,8 @@ error_fault:
386 kfree_skb(skb); 386 kfree_skb(skb);
387error: 387error:
388 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); 388 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
389 if (err == -ENOBUFS && !inet->recverr)
390 err = 0;
389 return err; 391 return err;
390} 392}
391 393
@@ -576,8 +578,11 @@ back_from_confirm:
576 &ipc, &rt, msg->msg_flags); 578 &ipc, &rt, msg->msg_flags);
577 if (err) 579 if (err)
578 ip_flush_pending_frames(sk); 580 ip_flush_pending_frames(sk);
579 else if (!(msg->msg_flags & MSG_MORE)) 581 else if (!(msg->msg_flags & MSG_MORE)) {
580 err = ip_push_pending_frames(sk); 582 err = ip_push_pending_frames(sk);
583 if (err == -ENOBUFS && !inet->recverr)
584 err = 0;
585 }
581 release_sock(sk); 586 release_sock(sk);
582 } 587 }
583done: 588done:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fafbe163e2b5..91867d3e6328 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1514,13 +1514,17 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1514void ip_rt_send_redirect(struct sk_buff *skb) 1514void ip_rt_send_redirect(struct sk_buff *skb)
1515{ 1515{
1516 struct rtable *rt = skb_rtable(skb); 1516 struct rtable *rt = skb_rtable(skb);
1517 struct in_device *in_dev = in_dev_get(rt->u.dst.dev); 1517 struct in_device *in_dev;
1518 int log_martians;
1518 1519
1519 if (!in_dev) 1520 rcu_read_lock();
1521 in_dev = __in_dev_get_rcu(rt->u.dst.dev);
1522 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1523 rcu_read_unlock();
1520 return; 1524 return;
1521 1525 }
1522 if (!IN_DEV_TX_REDIRECTS(in_dev)) 1526 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1523 goto out; 1527 rcu_read_unlock();
1524 1528
1525 /* No redirected packets during ip_rt_redirect_silence; 1529 /* No redirected packets during ip_rt_redirect_silence;
1526 * reset the algorithm. 1530 * reset the algorithm.
@@ -1533,7 +1537,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1533 */ 1537 */
1534 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { 1538 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1535 rt->u.dst.rate_last = jiffies; 1539 rt->u.dst.rate_last = jiffies;
1536 goto out; 1540 return;
1537 } 1541 }
1538 1542
1539 /* Check for load limit; set rate_last to the latest sent 1543 /* Check for load limit; set rate_last to the latest sent
@@ -1547,7 +1551,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1547 rt->u.dst.rate_last = jiffies; 1551 rt->u.dst.rate_last = jiffies;
1548 ++rt->u.dst.rate_tokens; 1552 ++rt->u.dst.rate_tokens;
1549#ifdef CONFIG_IP_ROUTE_VERBOSE 1553#ifdef CONFIG_IP_ROUTE_VERBOSE
1550 if (IN_DEV_LOG_MARTIANS(in_dev) && 1554 if (log_martians &&
1551 rt->u.dst.rate_tokens == ip_rt_redirect_number && 1555 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1552 net_ratelimit()) 1556 net_ratelimit())
1553 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1557 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
@@ -1555,8 +1559,6 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1555 &rt->rt_dst, &rt->rt_gateway); 1559 &rt->rt_dst, &rt->rt_gateway);
1556#endif 1560#endif
1557 } 1561 }
1558out:
1559 in_dev_put(in_dev);
1560} 1562}
1561 1563
1562static int ip_error(struct sk_buff *skb) 1564static int ip_error(struct sk_buff *skb)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 91145244ea63..edeea060db44 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1839,7 +1839,7 @@ void tcp_close(struct sock *sk, long timeout)
1839 /* Unread data was tossed, zap the connection. */ 1839 /* Unread data was tossed, zap the connection. */
1840 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 1840 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1841 tcp_set_state(sk, TCP_CLOSE); 1841 tcp_set_state(sk, TCP_CLOSE);
1842 tcp_send_active_reset(sk, GFP_KERNEL); 1842 tcp_send_active_reset(sk, sk->sk_allocation);
1843 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1843 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1844 /* Check zero linger _after_ checking for unread data. */ 1844 /* Check zero linger _after_ checking for unread data. */
1845 sk->sk_prot->disconnect(sk, 0); 1845 sk->sk_prot->disconnect(sk, 0);
@@ -2336,13 +2336,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2336 val = !!(tp->nonagle&TCP_NAGLE_CORK); 2336 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2337 break; 2337 break;
2338 case TCP_KEEPIDLE: 2338 case TCP_KEEPIDLE:
2339 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ; 2339 val = keepalive_time_when(tp) / HZ;
2340 break; 2340 break;
2341 case TCP_KEEPINTVL: 2341 case TCP_KEEPINTVL:
2342 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ; 2342 val = keepalive_intvl_when(tp) / HZ;
2343 break; 2343 break;
2344 case TCP_KEEPCNT: 2344 case TCP_KEEPCNT:
2345 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; 2345 val = keepalive_probes(tp);
2346 break; 2346 break;
2347 case TCP_SYNCNT: 2347 case TCP_SYNCNT:
2348 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 2348 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
@@ -2658,7 +2658,7 @@ void tcp_free_md5sig_pool(void)
2658 2658
2659EXPORT_SYMBOL(tcp_free_md5sig_pool); 2659EXPORT_SYMBOL(tcp_free_md5sig_pool);
2660 2660
2661static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) 2661static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
2662{ 2662{
2663 int cpu; 2663 int cpu;
2664 struct tcp_md5sig_pool **pool; 2664 struct tcp_md5sig_pool **pool;
@@ -2671,7 +2671,7 @@ static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2671 struct tcp_md5sig_pool *p; 2671 struct tcp_md5sig_pool *p;
2672 struct crypto_hash *hash; 2672 struct crypto_hash *hash;
2673 2673
2674 p = kzalloc(sizeof(*p), GFP_KERNEL); 2674 p = kzalloc(sizeof(*p), sk->sk_allocation);
2675 if (!p) 2675 if (!p)
2676 goto out_free; 2676 goto out_free;
2677 *per_cpu_ptr(pool, cpu) = p; 2677 *per_cpu_ptr(pool, cpu) = p;
@@ -2688,7 +2688,7 @@ out_free:
2688 return NULL; 2688 return NULL;
2689} 2689}
2690 2690
2691struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) 2691struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
2692{ 2692{
2693 struct tcp_md5sig_pool **pool; 2693 struct tcp_md5sig_pool **pool;
2694 int alloc = 0; 2694 int alloc = 0;
@@ -2709,7 +2709,7 @@ retry:
2709 2709
2710 if (alloc) { 2710 if (alloc) {
2711 /* we cannot hold spinlock here because this may sleep. */ 2711 /* we cannot hold spinlock here because this may sleep. */
2712 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); 2712 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
2713 spin_lock_bh(&tcp_md5sig_pool_lock); 2713 spin_lock_bh(&tcp_md5sig_pool_lock);
2714 if (!p) { 2714 if (!p) {
2715 tcp_md5sig_users--; 2715 tcp_md5sig_users--;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bdb0da237e6..af6d6fa00db1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -685,7 +685,7 @@ static inline void tcp_set_rto(struct sock *sk)
685 * is invisible. Actually, Linux-2.4 also generates erratic 685 * is invisible. Actually, Linux-2.4 also generates erratic
686 * ACKs in some circumstances. 686 * ACKs in some circumstances.
687 */ 687 */
688 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 688 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
689 689
690 /* 2. Fixups made earlier cannot be right. 690 /* 2. Fixups made earlier cannot be right.
691 * If we do not estimate RTO correctly without them, 691 * If we do not estimate RTO correctly without them,
@@ -696,8 +696,7 @@ static inline void tcp_set_rto(struct sock *sk)
696 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 696 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
697 * guarantees that rto is higher. 697 * guarantees that rto is higher.
698 */ 698 */
699 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 699 tcp_bound_rto(sk);
700 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
701} 700}
702 701
703/* Save metrics learned by this TCP session. 702/* Save metrics learned by this TCP session.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6d88219c5e22..0543561da999 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -328,26 +328,29 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
328 * 328 *
329 */ 329 */
330 330
331void tcp_v4_err(struct sk_buff *skb, u32 info) 331void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
332{ 332{
333 struct iphdr *iph = (struct iphdr *)skb->data; 333 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
334 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 334 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
335 struct inet_connection_sock *icsk;
335 struct tcp_sock *tp; 336 struct tcp_sock *tp;
336 struct inet_sock *inet; 337 struct inet_sock *inet;
337 const int type = icmp_hdr(skb)->type; 338 const int type = icmp_hdr(icmp_skb)->type;
338 const int code = icmp_hdr(skb)->code; 339 const int code = icmp_hdr(icmp_skb)->code;
339 struct sock *sk; 340 struct sock *sk;
341 struct sk_buff *skb;
340 __u32 seq; 342 __u32 seq;
343 __u32 remaining;
341 int err; 344 int err;
342 struct net *net = dev_net(skb->dev); 345 struct net *net = dev_net(icmp_skb->dev);
343 346
344 if (skb->len < (iph->ihl << 2) + 8) { 347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); 348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346 return; 349 return;
347 } 350 }
348 351
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, 352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(skb)); 353 iph->saddr, th->source, inet_iif(icmp_skb));
351 if (!sk) { 354 if (!sk) {
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); 355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 return; 356 return;
@@ -367,6 +370,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
367 if (sk->sk_state == TCP_CLOSE) 370 if (sk->sk_state == TCP_CLOSE)
368 goto out; 371 goto out;
369 372
373 icsk = inet_csk(sk);
370 tp = tcp_sk(sk); 374 tp = tcp_sk(sk);
371 seq = ntohl(th->seq); 375 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN && 376 if (sk->sk_state != TCP_LISTEN &&
@@ -393,6 +397,39 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
393 } 397 }
394 398
395 err = icmp_err_convert[code].errno; 399 err = icmp_err_convert[code].errno;
400 /* check if icmp_skb allows revert of backoff
401 * (see draft-zimmermann-tcp-lcd) */
402 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
403 break;
404 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
405 !icsk->icsk_backoff)
406 break;
407
408 icsk->icsk_backoff--;
409 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
410 icsk->icsk_backoff;
411 tcp_bound_rto(sk);
412
413 skb = tcp_write_queue_head(sk);
414 BUG_ON(!skb);
415
416 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
417 tcp_time_stamp - TCP_SKB_CB(skb)->when);
418
419 if (remaining) {
420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
421 remaining, TCP_RTO_MAX);
422 } else if (sock_owned_by_user(sk)) {
423 /* RTO revert clocked out retransmission,
424 * but socket is locked. Will defer. */
425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
426 HZ/20, TCP_RTO_MAX);
427 } else {
428 /* RTO revert clocked out retransmission.
429 * Will retransmit now */
430 tcp_retransmit_timer(sk);
431 }
432
396 break; 433 break;
397 case ICMP_TIME_EXCEEDED: 434 case ICMP_TIME_EXCEEDED:
398 err = EHOSTUNREACH; 435 err = EHOSTUNREACH;
@@ -849,7 +886,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
849 } 886 }
850 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 887 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
851 } 888 }
852 if (tcp_alloc_md5sig_pool() == NULL) { 889 if (tcp_alloc_md5sig_pool(sk) == NULL) {
853 kfree(newkey); 890 kfree(newkey);
854 return -ENOMEM; 891 return -ENOMEM;
855 } 892 }
@@ -970,8 +1007,9 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
970 1007
971 if (!tcp_sk(sk)->md5sig_info) { 1008 if (!tcp_sk(sk)->md5sig_info) {
972 struct tcp_sock *tp = tcp_sk(sk); 1009 struct tcp_sock *tp = tcp_sk(sk);
973 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL); 1010 struct tcp_md5sig_info *p;
974 1011
1012 p = kzalloc(sizeof(*p), sk->sk_allocation);
975 if (!p) 1013 if (!p)
976 return -EINVAL; 1014 return -EINVAL;
977 1015
@@ -979,7 +1017,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
979 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1017 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
980 } 1018 }
981 1019
982 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 1020 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
983 if (!newkey) 1021 if (!newkey)
984 return -ENOMEM; 1022 return -ENOMEM;
985 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, 1023 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
@@ -1158,7 +1196,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1158}; 1196};
1159 1197
1160#ifdef CONFIG_TCP_MD5SIG 1198#ifdef CONFIG_TCP_MD5SIG
1161static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { 1199static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1162 .md5_lookup = tcp_v4_reqsk_md5_lookup, 1200 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1163 .calc_md5_hash = tcp_v4_md5_hash_skb, 1201 .calc_md5_hash = tcp_v4_md5_hash_skb,
1164}; 1202};
@@ -1717,7 +1755,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1717 return 0; 1755 return 0;
1718} 1756}
1719 1757
1720struct inet_connection_sock_af_ops ipv4_specific = { 1758const struct inet_connection_sock_af_ops ipv4_specific = {
1721 .queue_xmit = ip_queue_xmit, 1759 .queue_xmit = ip_queue_xmit,
1722 .send_check = tcp_v4_send_check, 1760 .send_check = tcp_v4_send_check,
1723 .rebuild_header = inet_sk_rebuild_header, 1761 .rebuild_header = inet_sk_rebuild_header,
@@ -1737,7 +1775,7 @@ struct inet_connection_sock_af_ops ipv4_specific = {
1737}; 1775};
1738 1776
1739#ifdef CONFIG_TCP_MD5SIG 1777#ifdef CONFIG_TCP_MD5SIG
1740static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1778static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1741 .md5_lookup = tcp_v4_md5_lookup, 1779 .md5_lookup = tcp_v4_md5_lookup,
1742 .calc_md5_hash = tcp_v4_md5_hash_skb, 1780 .calc_md5_hash = tcp_v4_md5_hash_skb,
1743 .md5_add = tcp_v4_md5_add_func, 1781 .md5_add = tcp_v4_md5_add_func,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f8d67ccc64f3..e48c37d74d77 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -322,7 +322,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
322 if (key != NULL) { 322 if (key != NULL) {
323 memcpy(&tcptw->tw_md5_key, key->key, key->keylen); 323 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
324 tcptw->tw_md5_keylen = key->keylen; 324 tcptw->tw_md5_keylen = key->keylen;
325 if (tcp_alloc_md5sig_pool() == NULL) 325 if (tcp_alloc_md5sig_pool(sk) == NULL)
326 BUG(); 326 BUG();
327 } 327 }
328 } while (0); 328 } while (0);
@@ -657,29 +657,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
657 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 657 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
658 if (child == NULL) 658 if (child == NULL)
659 goto listen_overflow; 659 goto listen_overflow;
660#ifdef CONFIG_TCP_MD5SIG
661 else {
662 /* Copy over the MD5 key from the original socket */
663 struct tcp_md5sig_key *key;
664 struct tcp_sock *tp = tcp_sk(sk);
665 key = tp->af_specific->md5_lookup(sk, child);
666 if (key != NULL) {
667 /*
668 * We're using one, so create a matching key on the
669 * newsk structure. If we fail to get memory then we
670 * end up not copying the key across. Shucks.
671 */
672 char *newkey = kmemdup(key->key, key->keylen,
673 GFP_ATOMIC);
674 if (newkey) {
675 if (!tcp_alloc_md5sig_pool())
676 BUG();
677 tp->af_specific->md5_add(child, child, newkey,
678 key->keylen);
679 }
680 }
681 }
682#endif
683 660
684 inet_csk_reqsk_queue_unlink(sk, req, prev); 661 inet_csk_reqsk_queue_unlink(sk, req, prev);
685 inet_csk_reqsk_queue_removed(sk, req); 662 inet_csk_reqsk_queue_removed(sk, req);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4e004424d400..5200aab0ca97 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2135,7 +2135,8 @@ void tcp_send_fin(struct sock *sk)
2135 } else { 2135 } else {
2136 /* Socket is locked, keep trying until memory is available. */ 2136 /* Socket is locked, keep trying until memory is available. */
2137 for (;;) { 2137 for (;;) {
2138 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 2138 skb = alloc_skb_fclone(MAX_TCP_HEADER,
2139 sk->sk_allocation);
2139 if (skb) 2140 if (skb)
2140 break; 2141 break;
2141 yield(); 2142 yield();
@@ -2388,7 +2389,7 @@ int tcp_connect(struct sock *sk)
2388 sk->sk_wmem_queued += buff->truesize; 2389 sk->sk_wmem_queued += buff->truesize;
2389 sk_mem_charge(sk, buff->truesize); 2390 sk_mem_charge(sk, buff->truesize);
2390 tp->packets_out += tcp_skb_pcount(buff); 2391 tp->packets_out += tcp_skb_pcount(buff);
2391 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2392 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2392 2393
2393 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2394 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2394 * in order to make this packet get counted in tcpOutSegs. 2395 * in order to make this packet get counted in tcpOutSegs.
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b144a26359bc..cdb2ca7684d4 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -137,13 +137,14 @@ static int tcp_write_timeout(struct sock *sk)
137{ 137{
138 struct inet_connection_sock *icsk = inet_csk(sk); 138 struct inet_connection_sock *icsk = inet_csk(sk);
139 int retry_until; 139 int retry_until;
140 bool do_reset;
140 141
141 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 142 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
142 if (icsk->icsk_retransmits) 143 if (icsk->icsk_retransmits)
143 dst_negative_advice(&sk->sk_dst_cache); 144 dst_negative_advice(&sk->sk_dst_cache);
144 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 145 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
145 } else { 146 } else {
146 if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { 147 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
147 /* Black hole detection */ 148 /* Black hole detection */
148 tcp_mtu_probing(icsk, sk); 149 tcp_mtu_probing(icsk, sk);
149 150
@@ -155,13 +156,15 @@ static int tcp_write_timeout(struct sock *sk)
155 const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 156 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
156 157
157 retry_until = tcp_orphan_retries(sk, alive); 158 retry_until = tcp_orphan_retries(sk, alive);
159 do_reset = alive ||
160 !retransmits_timed_out(sk, retry_until);
158 161
159 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) 162 if (tcp_out_of_resources(sk, do_reset))
160 return 1; 163 return 1;
161 } 164 }
162 } 165 }
163 166
164 if (icsk->icsk_retransmits >= retry_until) { 167 if (retransmits_timed_out(sk, retry_until)) {
165 /* Has it gone just too far? */ 168 /* Has it gone just too far? */
166 tcp_write_err(sk); 169 tcp_write_err(sk);
167 return 1; 170 return 1;
@@ -279,7 +282,7 @@ static void tcp_probe_timer(struct sock *sk)
279 * The TCP retransmit timer. 282 * The TCP retransmit timer.
280 */ 283 */
281 284
282static void tcp_retransmit_timer(struct sock *sk) 285void tcp_retransmit_timer(struct sock *sk)
283{ 286{
284 struct tcp_sock *tp = tcp_sk(sk); 287 struct tcp_sock *tp = tcp_sk(sk);
285 struct inet_connection_sock *icsk = inet_csk(sk); 288 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -385,7 +388,7 @@ static void tcp_retransmit_timer(struct sock *sk)
385out_reset_timer: 388out_reset_timer:
386 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 389 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
387 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 390 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
388 if (icsk->icsk_retransmits > sysctl_tcp_retries1) 391 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
389 __sk_dst_reset(sk); 392 __sk_dst_reset(sk);
390 393
391out:; 394out:;
@@ -499,8 +502,7 @@ static void tcp_keepalive_timer (unsigned long data)
499 elapsed = tcp_time_stamp - tp->rcv_tstamp; 502 elapsed = tcp_time_stamp - tp->rcv_tstamp;
500 503
501 if (elapsed >= keepalive_time_when(tp)) { 504 if (elapsed >= keepalive_time_when(tp)) {
502 if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) || 505 if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
503 (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) {
504 tcp_send_active_reset(sk, GFP_ATOMIC); 506 tcp_send_active_reset(sk, GFP_ATOMIC);
505 tcp_write_err(sk); 507 tcp_write_err(sk);
506 goto out; 508 goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 29ebb0d27a1e..ebaaa7f973d7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -561,12 +561,18 @@ static int udp_push_pending_frames(struct sock *sk)
561 561
562send: 562send:
563 err = ip_push_pending_frames(sk); 563 err = ip_push_pending_frames(sk);
564 if (err) {
565 if (err == -ENOBUFS && !inet->recverr) {
566 UDP_INC_STATS_USER(sock_net(sk),
567 UDP_MIB_SNDBUFERRORS, is_udplite);
568 err = 0;
569 }
570 } else
571 UDP_INC_STATS_USER(sock_net(sk),
572 UDP_MIB_OUTDATAGRAMS, is_udplite);
564out: 573out:
565 up->len = 0; 574 up->len = 0;
566 up->pending = 0; 575 up->pending = 0;
567 if (!err)
568 UDP_INC_STATS_USER(sock_net(sk),
569 UDP_MIB_OUTDATAGRAMS, is_udplite);
570 return err; 576 return err;
571} 577}
572 578
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bf85d5f97032..a123a328aeb3 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -306,8 +306,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
306 v4addr != htonl(INADDR_ANY) && 306 v4addr != htonl(INADDR_ANY) &&
307 chk_addr_ret != RTN_LOCAL && 307 chk_addr_ret != RTN_LOCAL &&
308 chk_addr_ret != RTN_MULTICAST && 308 chk_addr_ret != RTN_MULTICAST &&
309 chk_addr_ret != RTN_BROADCAST) 309 chk_addr_ret != RTN_BROADCAST) {
310 err = -EADDRNOTAVAIL;
310 goto out; 311 goto out;
312 }
311 } else { 313 } else {
312 if (addr_type != IPV6_ADDR_ANY) { 314 if (addr_type != IPV6_ADDR_ANY) {
313 struct net_device *dev = NULL; 315 struct net_device *dev = NULL;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index eab62a7a8f06..e2325f6a05fb 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -323,7 +323,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
323 int iif = 0; 323 int iif = 0;
324 int addr_type = 0; 324 int addr_type = 0;
325 int len; 325 int len;
326 int hlimit, tclass; 326 int hlimit;
327 int err = 0; 327 int err = 0;
328 328
329 if ((u8 *)hdr < skb->head || 329 if ((u8 *)hdr < skb->head ||
@@ -469,10 +469,6 @@ route_done:
469 if (hlimit < 0) 469 if (hlimit < 0)
470 hlimit = ip6_dst_hoplimit(dst); 470 hlimit = ip6_dst_hoplimit(dst);
471 471
472 tclass = np->tclass;
473 if (tclass < 0)
474 tclass = 0;
475
476 msg.skb = skb; 472 msg.skb = skb;
477 msg.offset = skb_network_offset(skb); 473 msg.offset = skb_network_offset(skb);
478 msg.type = type; 474 msg.type = type;
@@ -488,8 +484,8 @@ route_done:
488 484
489 err = ip6_append_data(sk, icmpv6_getfrag, &msg, 485 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
490 len + sizeof(struct icmp6hdr), 486 len + sizeof(struct icmp6hdr),
491 sizeof(struct icmp6hdr), 487 sizeof(struct icmp6hdr), hlimit,
492 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst, 488 np->tclass, NULL, &fl, (struct rt6_info*)dst,
493 MSG_DONTWAIT); 489 MSG_DONTWAIT);
494 if (err) { 490 if (err) {
495 ip6_flush_pending_frames(sk); 491 ip6_flush_pending_frames(sk);
@@ -522,7 +518,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
522 struct dst_entry *dst; 518 struct dst_entry *dst;
523 int err = 0; 519 int err = 0;
524 int hlimit; 520 int hlimit;
525 int tclass;
526 521
527 saddr = &ipv6_hdr(skb)->daddr; 522 saddr = &ipv6_hdr(skb)->daddr;
528 523
@@ -562,10 +557,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
562 if (hlimit < 0) 557 if (hlimit < 0)
563 hlimit = ip6_dst_hoplimit(dst); 558 hlimit = ip6_dst_hoplimit(dst);
564 559
565 tclass = np->tclass;
566 if (tclass < 0)
567 tclass = 0;
568
569 idev = in6_dev_get(skb->dev); 560 idev = in6_dev_get(skb->dev);
570 561
571 msg.skb = skb; 562 msg.skb = skb;
@@ -573,7 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
573 msg.type = ICMPV6_ECHO_REPLY; 564 msg.type = ICMPV6_ECHO_REPLY;
574 565
575 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 566 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
576 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, 567 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
577 (struct rt6_info*)dst, MSG_DONTWAIT); 568 (struct rt6_info*)dst, MSG_DONTWAIT);
578 569
579 if (err) { 570 if (err) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 93beee944657..cd48801a8d6f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -194,7 +194,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
194 struct ipv6hdr *hdr; 194 struct ipv6hdr *hdr;
195 u8 proto = fl->proto; 195 u8 proto = fl->proto;
196 int seg_len = skb->len; 196 int seg_len = skb->len;
197 int hlimit, tclass; 197 int hlimit = -1;
198 int tclass = 0;
198 u32 mtu; 199 u32 mtu;
199 200
200 if (opt) { 201 if (opt) {
@@ -237,19 +238,13 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
237 /* 238 /*
238 * Fill in the IPv6 header 239 * Fill in the IPv6 header
239 */ 240 */
240 241 if (np) {
241 hlimit = -1; 242 tclass = np->tclass;
242 if (np)
243 hlimit = np->hop_limit; 243 hlimit = np->hop_limit;
244 }
244 if (hlimit < 0) 245 if (hlimit < 0)
245 hlimit = ip6_dst_hoplimit(dst); 246 hlimit = ip6_dst_hoplimit(dst);
246 247
247 tclass = -1;
248 if (np)
249 tclass = np->tclass;
250 if (tclass < 0)
251 tclass = 0;
252
253 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; 248 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
254 249
255 hdr->payload_len = htons(seg_len); 250 hdr->payload_len = htons(seg_len);
@@ -1516,7 +1511,7 @@ int ip6_push_pending_frames(struct sock *sk)
1516 err = ip6_local_out(skb); 1511 err = ip6_local_out(skb);
1517 if (err) { 1512 if (err) {
1518 if (err > 0) 1513 if (err > 0)
1519 err = np->recverr ? net_xmit_errno(err) : 0; 1514 err = net_xmit_errno(err);
1520 if (err) 1515 if (err)
1521 goto error; 1516 goto error;
1522 } 1517 }
@@ -1525,6 +1520,7 @@ out:
1525 ip6_cork_release(inet, np); 1520 ip6_cork_release(inet, np);
1526 return err; 1521 return err;
1527error: 1522error:
1523 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1528 goto out; 1524 goto out;
1529} 1525}
1530 1526
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a1d6045c4694..7d25bbe32110 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1036,7 +1036,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1036 return 0; 1036 return 0;
1037} 1037}
1038 1038
1039static int 1039static netdev_tx_t
1040ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1040ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1041{ 1041{
1042 struct ip6_tnl *t = netdev_priv(dev); 1042 struct ip6_tnl *t = netdev_priv(dev);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 07ded5075b33..5c8d73730c75 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -204,7 +204,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
204 return 0; 204 return 0;
205} 205}
206 206
207static struct seq_operations ip6mr_vif_seq_ops = { 207static const struct seq_operations ip6mr_vif_seq_ops = {
208 .start = ip6mr_vif_seq_start, 208 .start = ip6mr_vif_seq_start,
209 .next = ip6mr_vif_seq_next, 209 .next = ip6mr_vif_seq_next,
210 .stop = ip6mr_vif_seq_stop, 210 .stop = ip6mr_vif_seq_stop,
@@ -217,7 +217,7 @@ static int ip6mr_vif_open(struct inode *inode, struct file *file)
217 sizeof(struct ipmr_vif_iter)); 217 sizeof(struct ipmr_vif_iter));
218} 218}
219 219
220static struct file_operations ip6mr_vif_fops = { 220static const struct file_operations ip6mr_vif_fops = {
221 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
222 .open = ip6mr_vif_open, 222 .open = ip6mr_vif_open,
223 .read = seq_read, 223 .read = seq_read,
@@ -341,7 +341,7 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
341 sizeof(struct ipmr_mfc_iter)); 341 sizeof(struct ipmr_mfc_iter));
342} 342}
343 343
344static struct file_operations ip6mr_mfc_fops = { 344static const struct file_operations ip6mr_mfc_fops = {
345 .owner = THIS_MODULE, 345 .owner = THIS_MODULE,
346 .open = ipmr_mfc_open, 346 .open = ipmr_mfc_open,
347 .read = seq_read, 347 .read = seq_read,
@@ -416,7 +416,8 @@ static struct inet6_protocol pim6_protocol = {
416 416
417/* Service routines creating virtual interfaces: PIMREG */ 417/* Service routines creating virtual interfaces: PIMREG */
418 418
419static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 419static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
420 struct net_device *dev)
420{ 421{
421 struct net *net = dev_net(dev); 422 struct net *net = dev_net(dev);
422 423
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index a7fdf9a27f15..f5e0682b402d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -315,6 +315,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
315 goto e_inval; 315 goto e_inval;
316 if (val < -1 || val > 0xff) 316 if (val < -1 || val > 0xff)
317 goto e_inval; 317 goto e_inval;
318 /* RFC 3542, 6.5: default traffic class of 0x0 */
319 if (val == -1)
320 val = 0;
318 np->tclass = val; 321 np->tclass = val;
319 retv = 0; 322 retv = 0;
320 break; 323 break;
@@ -1037,8 +1040,6 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1037 1040
1038 case IPV6_TCLASS: 1041 case IPV6_TCLASS:
1039 val = np->tclass; 1042 val = np->tclass;
1040 if (val < 0)
1041 val = 0;
1042 break; 1043 break;
1043 1044
1044 case IPV6_RECVTCLASS: 1045 case IPV6_RECVTCLASS:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9eb68e92cc18..7015478797f6 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -98,7 +98,7 @@ static int pndisc_constructor(struct pneigh_entry *n);
98static void pndisc_destructor(struct pneigh_entry *n); 98static void pndisc_destructor(struct pneigh_entry *n);
99static void pndisc_redo(struct sk_buff *skb); 99static void pndisc_redo(struct sk_buff *skb);
100 100
101static struct neigh_ops ndisc_generic_ops = { 101static const struct neigh_ops ndisc_generic_ops = {
102 .family = AF_INET6, 102 .family = AF_INET6,
103 .solicit = ndisc_solicit, 103 .solicit = ndisc_solicit,
104 .error_report = ndisc_error_report, 104 .error_report = ndisc_error_report,
@@ -108,7 +108,7 @@ static struct neigh_ops ndisc_generic_ops = {
108 .queue_xmit = dev_queue_xmit, 108 .queue_xmit = dev_queue_xmit,
109}; 109};
110 110
111static struct neigh_ops ndisc_hh_ops = { 111static const struct neigh_ops ndisc_hh_ops = {
112 .family = AF_INET6, 112 .family = AF_INET6,
113 .solicit = ndisc_solicit, 113 .solicit = ndisc_solicit,
114 .error_report = ndisc_error_report, 114 .error_report = ndisc_error_report,
@@ -119,7 +119,7 @@ static struct neigh_ops ndisc_hh_ops = {
119}; 119};
120 120
121 121
122static struct neigh_ops ndisc_direct_ops = { 122static const struct neigh_ops ndisc_direct_ops = {
123 .family = AF_INET6, 123 .family = AF_INET6,
124 .output = dev_queue_xmit, 124 .output = dev_queue_xmit,
125 .connected_output = dev_queue_xmit, 125 .connected_output = dev_queue_xmit,
@@ -955,8 +955,8 @@ static void ndisc_recv_na(struct sk_buff *skb)
955 */ 955 */
956 if (skb->pkt_type != PACKET_LOOPBACK) 956 if (skb->pkt_type != PACKET_LOOPBACK)
957 ND_PRINTK1(KERN_WARNING 957 ND_PRINTK1(KERN_WARNING
958 "ICMPv6 NA: someone advertises our address on %s!\n", 958 "ICMPv6 NA: someone advertises our address %pI6 on %s!\n",
959 ifp->idev->dev->name); 959 &ifp->addr, ifp->idev->dev->name);
960 in6_ifa_put(ifp); 960 in6_ifa_put(ifp);
961 return; 961 return;
962 } 962 }
@@ -1151,10 +1151,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1151 skb->dev->name); 1151 skb->dev->name);
1152 return; 1152 return;
1153 } 1153 }
1154 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) {
1155 in6_dev_put(in6_dev);
1156 return;
1157 }
1158 1154
1159 if (!ndisc_parse_options(opt, optlen, &ndopts)) { 1155 if (!ndisc_parse_options(opt, optlen, &ndopts)) {
1160 in6_dev_put(in6_dev); 1156 in6_dev_put(in6_dev);
@@ -1163,6 +1159,10 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1163 return; 1159 return;
1164 } 1160 }
1165 1161
1162 /* skip route and link configuration on routers */
1163 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
1164 goto skip_linkparms;
1165
1166#ifdef CONFIG_IPV6_NDISC_NODETYPE 1166#ifdef CONFIG_IPV6_NDISC_NODETYPE
1167 /* skip link-specific parameters from interior routers */ 1167 /* skip link-specific parameters from interior routers */
1168 if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) 1168 if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
@@ -1283,9 +1283,7 @@ skip_defrtr:
1283 } 1283 }
1284 } 1284 }
1285 1285
1286#ifdef CONFIG_IPV6_NDISC_NODETYPE
1287skip_linkparms: 1286skip_linkparms:
1288#endif
1289 1287
1290 /* 1288 /*
1291 * Process options. 1289 * Process options.
@@ -1312,6 +1310,10 @@ skip_linkparms:
1312 NEIGH_UPDATE_F_ISROUTER); 1310 NEIGH_UPDATE_F_ISROUTER);
1313 } 1311 }
1314 1312
1313 /* skip route and link configuration on routers */
1314 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
1315 goto out;
1316
1315#ifdef CONFIG_IPV6_ROUTE_INFO 1317#ifdef CONFIG_IPV6_ROUTE_INFO
1316 if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { 1318 if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
1317 struct nd_opt_hdr *p; 1319 struct nd_opt_hdr *p;
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9ab789159913..568864f722ca 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -20,20 +20,9 @@
20 * - Removed unused variable 'inet6_protocol_base' 20 * - Removed unused variable 'inet6_protocol_base'
21 * - Modified inet6_del_protocol() to correctly maintain copy bit. 21 * - Modified inet6_del_protocol() to correctly maintain copy bit.
22 */ 22 */
23 23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/in6.h>
30#include <linux/netdevice.h> 24#include <linux/netdevice.h>
31#include <linux/if_arp.h> 25#include <linux/spinlock.h>
32
33#include <net/sock.h>
34#include <net/snmp.h>
35
36#include <net/ipv6.h>
37#include <net/protocol.h> 26#include <net/protocol.h>
38 27
39struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; 28struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d6c3c1c34b2d..7d675b8d82d3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -642,7 +642,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
642 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, 642 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
643 dst_output); 643 dst_output);
644 if (err > 0) 644 if (err > 0)
645 err = np->recverr ? net_xmit_errno(err) : 0; 645 err = net_xmit_errno(err);
646 if (err) 646 if (err)
647 goto error; 647 goto error;
648out: 648out:
@@ -653,6 +653,8 @@ error_fault:
653 kfree_skb(skb); 653 kfree_skb(skb);
654error: 654error:
655 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 655 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
656 if (err == -ENOBUFS && !np->recverr)
657 err = 0;
656 return err; 658 return err;
657} 659}
658 660
@@ -877,11 +879,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
877 hlimit = ip6_dst_hoplimit(dst); 879 hlimit = ip6_dst_hoplimit(dst);
878 } 880 }
879 881
880 if (tclass < 0) { 882 if (tclass < 0)
881 tclass = np->tclass; 883 tclass = np->tclass;
882 if (tclass < 0)
883 tclass = 0;
884 }
885 884
886 if (msg->msg_flags&MSG_CONFIRM) 885 if (msg->msg_flags&MSG_CONFIRM)
887 goto do_confirm; 886 goto do_confirm;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1473ee0a1f51..9ccfef345560 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -665,7 +665,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
665 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1; 665 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
666 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0; 666 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
667 667
668 ip6_dst_gc(net->ipv6.ip6_dst_ops); 668 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
669 669
670 net->ipv6.sysctl.ip6_rt_gc_elasticity = 670 net->ipv6.sysctl.ip6_rt_gc_elasticity =
671 saved_rt_elasticity; 671 saved_rt_elasticity;
@@ -970,7 +970,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
970 if (unlikely(idev == NULL)) 970 if (unlikely(idev == NULL))
971 return NULL; 971 return NULL;
972 972
973 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 973 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
974 if (unlikely(rt == NULL)) { 974 if (unlikely(rt == NULL)) {
975 in6_dev_put(idev); 975 in6_dev_put(idev);
976 goto out; 976 goto out;
@@ -1060,7 +1060,7 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1060static int ip6_dst_gc(struct dst_ops *ops) 1060static int ip6_dst_gc(struct dst_ops *ops)
1061{ 1061{
1062 unsigned long now = jiffies; 1062 unsigned long now = jiffies;
1063 struct net *net = ops->dst_net; 1063 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1064 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; 1064 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1065 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; 1065 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1066 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; 1066 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
@@ -1154,7 +1154,7 @@ int ip6_route_add(struct fib6_config *cfg)
1154 goto out; 1154 goto out;
1155 } 1155 }
1156 1156
1157 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1157 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1158 1158
1159 if (rt == NULL) { 1159 if (rt == NULL) {
1160 err = -ENOMEM; 1160 err = -ENOMEM;
@@ -1643,7 +1643,7 @@ out:
1643static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1643static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1644{ 1644{
1645 struct net *net = dev_net(ort->rt6i_dev); 1645 struct net *net = dev_net(ort->rt6i_dev);
1646 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1646 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1647 1647
1648 if (rt) { 1648 if (rt) {
1649 rt->u.dst.input = ort->u.dst.input; 1649 rt->u.dst.input = ort->u.dst.input;
@@ -1923,7 +1923,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1923 int anycast) 1923 int anycast)
1924{ 1924{
1925 struct net *net = dev_net(idev->dev); 1925 struct net *net = dev_net(idev->dev);
1926 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1926 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1927 struct neighbour *neigh; 1927 struct neighbour *neigh;
1928 1928
1929 if (rt == NULL) 1929 if (rt == NULL)
@@ -2501,7 +2501,7 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2501 net->ipv6.rt6_stats->fib_rt_alloc, 2501 net->ipv6.rt6_stats->fib_rt_alloc,
2502 net->ipv6.rt6_stats->fib_rt_entries, 2502 net->ipv6.rt6_stats->fib_rt_entries,
2503 net->ipv6.rt6_stats->fib_rt_cache, 2503 net->ipv6.rt6_stats->fib_rt_cache,
2504 atomic_read(&net->ipv6.ip6_dst_ops->entries), 2504 atomic_read(&net->ipv6.ip6_dst_ops.entries),
2505 net->ipv6.rt6_stats->fib_discarded_routes); 2505 net->ipv6.rt6_stats->fib_discarded_routes);
2506 2506
2507 return 0; 2507 return 0;
@@ -2637,7 +2637,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2637 2637
2638 if (table) { 2638 if (table) {
2639 table[0].data = &net->ipv6.sysctl.flush_delay; 2639 table[0].data = &net->ipv6.sysctl.flush_delay;
2640 table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh; 2640 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2641 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2641 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2642 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2642 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2643 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; 2643 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
@@ -2655,12 +2655,8 @@ static int ip6_route_net_init(struct net *net)
2655{ 2655{
2656 int ret = -ENOMEM; 2656 int ret = -ENOMEM;
2657 2657
2658 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template, 2658 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2659 sizeof(*net->ipv6.ip6_dst_ops), 2659 sizeof(net->ipv6.ip6_dst_ops));
2660 GFP_KERNEL);
2661 if (!net->ipv6.ip6_dst_ops)
2662 goto out;
2663 net->ipv6.ip6_dst_ops->dst_net = hold_net(net);
2664 2660
2665 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, 2661 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2666 sizeof(*net->ipv6.ip6_null_entry), 2662 sizeof(*net->ipv6.ip6_null_entry),
@@ -2669,7 +2665,7 @@ static int ip6_route_net_init(struct net *net)
2669 goto out_ip6_dst_ops; 2665 goto out_ip6_dst_ops;
2670 net->ipv6.ip6_null_entry->u.dst.path = 2666 net->ipv6.ip6_null_entry->u.dst.path =
2671 (struct dst_entry *)net->ipv6.ip6_null_entry; 2667 (struct dst_entry *)net->ipv6.ip6_null_entry;
2672 net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops; 2668 net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
2673 2669
2674#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2670#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2675 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 2671 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2679,7 +2675,7 @@ static int ip6_route_net_init(struct net *net)
2679 goto out_ip6_null_entry; 2675 goto out_ip6_null_entry;
2680 net->ipv6.ip6_prohibit_entry->u.dst.path = 2676 net->ipv6.ip6_prohibit_entry->u.dst.path =
2681 (struct dst_entry *)net->ipv6.ip6_prohibit_entry; 2677 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2682 net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops; 2678 net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
2683 2679
2684 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, 2680 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2685 sizeof(*net->ipv6.ip6_blk_hole_entry), 2681 sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2688,7 +2684,7 @@ static int ip6_route_net_init(struct net *net)
2688 goto out_ip6_prohibit_entry; 2684 goto out_ip6_prohibit_entry;
2689 net->ipv6.ip6_blk_hole_entry->u.dst.path = 2685 net->ipv6.ip6_blk_hole_entry->u.dst.path =
2690 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; 2686 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2691 net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops; 2687 net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
2692#endif 2688#endif
2693 2689
2694 net->ipv6.sysctl.flush_delay = 0; 2690 net->ipv6.sysctl.flush_delay = 0;
@@ -2717,8 +2713,6 @@ out_ip6_null_entry:
2717 kfree(net->ipv6.ip6_null_entry); 2713 kfree(net->ipv6.ip6_null_entry);
2718#endif 2714#endif
2719out_ip6_dst_ops: 2715out_ip6_dst_ops:
2720 release_net(net->ipv6.ip6_dst_ops->dst_net);
2721 kfree(net->ipv6.ip6_dst_ops);
2722 goto out; 2716 goto out;
2723} 2717}
2724 2718
@@ -2733,8 +2727,6 @@ static void ip6_route_net_exit(struct net *net)
2733 kfree(net->ipv6.ip6_prohibit_entry); 2727 kfree(net->ipv6.ip6_prohibit_entry);
2734 kfree(net->ipv6.ip6_blk_hole_entry); 2728 kfree(net->ipv6.ip6_blk_hole_entry);
2735#endif 2729#endif
2736 release_net(net->ipv6.ip6_dst_ops->dst_net);
2737 kfree(net->ipv6.ip6_dst_ops);
2738} 2730}
2739 2731
2740static struct pernet_operations ip6_route_net_ops = { 2732static struct pernet_operations ip6_route_net_ops = {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d335a306a4db..0ae4f6448187 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -609,7 +609,8 @@ static inline __be32 try_6to4(struct in6_addr *v6dst)
609 * and that skb is filled properly by that function. 609 * and that skb is filled properly by that function.
610 */ 610 */
611 611
612static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 612static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
613 struct net_device *dev)
613{ 614{
614 struct ip_tunnel *tunnel = netdev_priv(dev); 615 struct ip_tunnel *tunnel = netdev_priv(dev);
615 struct net_device_stats *stats = &tunnel->dev->stats; 616 struct net_device_stats *stats = &tunnel->dev->stats;
@@ -778,7 +779,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
778 iph->version = 4; 779 iph->version = 4;
779 iph->ihl = sizeof(struct iphdr)>>2; 780 iph->ihl = sizeof(struct iphdr)>>2;
780 if (mtu > IPV6_MIN_MTU) 781 if (mtu > IPV6_MIN_MTU)
781 iph->frag_off = htons(IP_DF); 782 iph->frag_off = tiph->frag_off;
782 else 783 else
783 iph->frag_off = 0; 784 iph->frag_off = 0;
784 785
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d849dd53b788..3aae0f217d61 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -75,11 +75,11 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 77
78static struct inet_connection_sock_af_ops ipv6_mapped; 78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static struct inet_connection_sock_af_ops ipv6_specific; 79static const struct inet_connection_sock_af_ops ipv6_specific;
80#ifdef CONFIG_TCP_MD5SIG 80#ifdef CONFIG_TCP_MD5SIG
81static struct tcp_sock_af_ops tcp_sock_ipv6_specific; 81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83#else 83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr) 85 struct in6_addr *addr)
@@ -591,7 +591,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
591 } 591 }
592 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 592 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
593 } 593 }
594 if (tcp_alloc_md5sig_pool() == NULL) { 594 if (tcp_alloc_md5sig_pool(sk) == NULL) {
595 kfree(newkey); 595 kfree(newkey);
596 return -ENOMEM; 596 return -ENOMEM;
597 } 597 }
@@ -894,7 +894,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
894}; 894};
895 895
896#ifdef CONFIG_TCP_MD5SIG 896#ifdef CONFIG_TCP_MD5SIG
897static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { 897static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
898 .md5_lookup = tcp_v6_reqsk_md5_lookup, 898 .md5_lookup = tcp_v6_reqsk_md5_lookup,
899 .calc_md5_hash = tcp_v6_md5_hash_skb, 899 .calc_md5_hash = tcp_v6_md5_hash_skb,
900}; 900};
@@ -1003,6 +1003,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1003 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1003 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1004 1004
1005 t1 = (struct tcphdr *) skb_push(buff, tot_len); 1005 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1006 skb_reset_transport_header(skb);
1006 1007
1007 /* Swap the send and the receive. */ 1008 /* Swap the send and the receive. */
1008 memset(t1, 0, sizeof(*t1)); 1009 memset(t1, 0, sizeof(*t1));
@@ -1760,7 +1761,7 @@ static int tcp_v6_remember_stamp(struct sock *sk)
1760 return 0; 1761 return 0;
1761} 1762}
1762 1763
1763static struct inet_connection_sock_af_ops ipv6_specific = { 1764static const struct inet_connection_sock_af_ops ipv6_specific = {
1764 .queue_xmit = inet6_csk_xmit, 1765 .queue_xmit = inet6_csk_xmit,
1765 .send_check = tcp_v6_send_check, 1766 .send_check = tcp_v6_send_check,
1766 .rebuild_header = inet6_sk_rebuild_header, 1767 .rebuild_header = inet6_sk_rebuild_header,
@@ -1780,7 +1781,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
1780}; 1781};
1781 1782
1782#ifdef CONFIG_TCP_MD5SIG 1783#ifdef CONFIG_TCP_MD5SIG
1783static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1784static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1784 .md5_lookup = tcp_v6_md5_lookup, 1785 .md5_lookup = tcp_v6_md5_lookup,
1785 .calc_md5_hash = tcp_v6_md5_hash_skb, 1786 .calc_md5_hash = tcp_v6_md5_hash_skb,
1786 .md5_add = tcp_v6_md5_add_func, 1787 .md5_add = tcp_v6_md5_add_func,
@@ -1792,7 +1793,7 @@ static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1792 * TCP over IPv4 via INET6 API 1793 * TCP over IPv4 via INET6 API
1793 */ 1794 */
1794 1795
1795static struct inet_connection_sock_af_ops ipv6_mapped = { 1796static const struct inet_connection_sock_af_ops ipv6_mapped = {
1796 .queue_xmit = ip_queue_xmit, 1797 .queue_xmit = ip_queue_xmit,
1797 .send_check = tcp_v4_send_check, 1798 .send_check = tcp_v4_send_check,
1798 .rebuild_header = inet_sk_rebuild_header, 1799 .rebuild_header = inet_sk_rebuild_header,
@@ -1812,7 +1813,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
1812}; 1813};
1813 1814
1814#ifdef CONFIG_TCP_MD5SIG 1815#ifdef CONFIG_TCP_MD5SIG
1815static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1816static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1816 .md5_lookup = tcp_v4_md5_lookup, 1817 .md5_lookup = tcp_v4_md5_lookup,
1817 .calc_md5_hash = tcp_v4_md5_hash_skb, 1818 .calc_md5_hash = tcp_v4_md5_hash_skb,
1818 .md5_add = tcp_v6_md5_add_func, 1819 .md5_add = tcp_v6_md5_add_func,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d79fa6724451..164040613c2e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -724,12 +724,18 @@ static int udp_v6_push_pending_frames(struct sock *sk)
724 724
725send: 725send:
726 err = ip6_push_pending_frames(sk); 726 err = ip6_push_pending_frames(sk);
727 if (err) {
728 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
729 UDP6_INC_STATS_USER(sock_net(sk),
730 UDP_MIB_SNDBUFERRORS, is_udplite);
731 err = 0;
732 }
733 } else
734 UDP6_INC_STATS_USER(sock_net(sk),
735 UDP_MIB_OUTDATAGRAMS, is_udplite);
727out: 736out:
728 up->len = 0; 737 up->len = 0;
729 up->pending = 0; 738 up->pending = 0;
730 if (!err)
731 UDP6_INC_STATS_USER(sock_net(sk),
732 UDP_MIB_OUTDATAGRAMS, is_udplite);
733 return err; 739 return err;
734} 740}
735 741
@@ -946,11 +952,8 @@ do_udp_sendmsg:
946 hlimit = ip6_dst_hoplimit(dst); 952 hlimit = ip6_dst_hoplimit(dst);
947 } 953 }
948 954
949 if (tclass < 0) { 955 if (tclass < 0)
950 tclass = np->tclass; 956 tclass = np->tclass;
951 if (tclass < 0)
952 tclass = 0;
953 }
954 957
955 if (msg->msg_flags&MSG_CONFIRM) 958 if (msg->msg_flags&MSG_CONFIRM)
956 goto do_confirm; 959 goto do_confirm;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 417b0e309495..f1118d92a191 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -41,6 +41,7 @@
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/uio.h> 42#include <linux/uio.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/smp_lock.h>
44#include <linux/socket.h> 45#include <linux/socket.h>
45#include <linux/sockios.h> 46#include <linux/sockios.h>
46#include <linux/string.h> 47#include <linux/string.h>
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index cb762c8723ea..50b43c57d5d8 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -45,6 +45,7 @@
45#include <linux/capability.h> 45#include <linux/capability.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/types.h> 47#include <linux/types.h>
48#include <linux/smp_lock.h>
48#include <linux/socket.h> 49#include <linux/socket.h>
49#include <linux/sockios.h> 50#include <linux/sockios.h>
50#include <linux/init.h> 51#include <linux/init.h>
@@ -714,6 +715,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
714 struct sock *sk = sock->sk; 715 struct sock *sk = sock->sk;
715 struct irda_sock *self = irda_sk(sk); 716 struct irda_sock *self = irda_sk(sk);
716 717
718 memset(&saddr, 0, sizeof(saddr));
717 if (peer) { 719 if (peer) {
718 if (sk->sk_state != TCP_ESTABLISHED) 720 if (sk->sk_state != TCP_ESTABLISHED)
719 return -ENOTCONN; 721 return -ENOTCONN;
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 64230cffcfee..7b6b631f647f 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -41,7 +41,8 @@
41 41
42static int irlan_eth_open(struct net_device *dev); 42static int irlan_eth_open(struct net_device *dev);
43static int irlan_eth_close(struct net_device *dev); 43static int irlan_eth_close(struct net_device *dev);
44static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev); 44static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
45 struct net_device *dev);
45static void irlan_eth_set_multicast_list( struct net_device *dev); 46static void irlan_eth_set_multicast_list( struct net_device *dev);
46static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev); 47static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
47 48
@@ -162,7 +163,8 @@ static int irlan_eth_close(struct net_device *dev)
162 * Transmits ethernet frames over IrDA link. 163 * Transmits ethernet frames over IrDA link.
163 * 164 *
164 */ 165 */
165static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev) 166static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
167 struct net_device *dev)
166{ 168{
167 struct irlan_cb *self = netdev_priv(dev); 169 struct irlan_cb *self = netdev_priv(dev);
168 int ret; 170 int ret;
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index bccf4d0059f0..b001c361ad30 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -241,7 +241,6 @@
241#include <linux/module.h> 241#include <linux/module.h>
242 242
243#include <linux/kernel.h> 243#include <linux/kernel.h>
244#include <linux/smp_lock.h>
245#include <linux/skbuff.h> 244#include <linux/skbuff.h>
246#include <linux/tty.h> 245#include <linux/tty.h>
247#include <linux/proc_fs.h> 246#include <linux/proc_fs.h>
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 6d8ae03c14f5..68cbcb19cbd8 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -13,6 +13,7 @@
13 * 2) as a control channel (write commands, read events) 13 * 2) as a control channel (write commands, read events)
14 */ 14 */
15 15
16#include <linux/smp_lock.h>
16#include "irnet_ppp.h" /* Private header */ 17#include "irnet_ppp.h" /* Private header */
17/* Please put other headers in irnet.h - Thanks */ 18/* Please put other headers in irnet.h - Thanks */
18 19
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index d9f8bd4ebd05..b5df2418f90c 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -95,7 +95,7 @@ static int
95/**************************** VARIABLES ****************************/ 95/**************************** VARIABLES ****************************/
96 96
97/* Filesystem callbacks (to call us) */ 97/* Filesystem callbacks (to call us) */
98static struct file_operations irnet_device_fops = 98static const struct file_operations irnet_device_fops =
99{ 99{
100 .owner = THIS_MODULE, 100 .owner = THIS_MODULE,
101 .read = dev_irnet_read, 101 .read = dev_irnet_read,
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 8ff1861649e8..318766e5dbdf 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -34,21 +34,21 @@
34#include <net/irda/irlap.h> 34#include <net/irda/irlap.h>
35#include <net/irda/irlmp.h> 35#include <net/irda/irlmp.h>
36 36
37extern struct file_operations discovery_seq_fops; 37extern const struct file_operations discovery_seq_fops;
38extern struct file_operations irlap_seq_fops; 38extern const struct file_operations irlap_seq_fops;
39extern struct file_operations irlmp_seq_fops; 39extern const struct file_operations irlmp_seq_fops;
40extern struct file_operations irttp_seq_fops; 40extern const struct file_operations irttp_seq_fops;
41extern struct file_operations irias_seq_fops; 41extern const struct file_operations irias_seq_fops;
42 42
43struct irda_entry { 43struct irda_entry {
44 const char *name; 44 const char *name;
45 struct file_operations *fops; 45 const struct file_operations *fops;
46}; 46};
47 47
48struct proc_dir_entry *proc_irda; 48struct proc_dir_entry *proc_irda;
49EXPORT_SYMBOL(proc_irda); 49EXPORT_SYMBOL(proc_irda);
50 50
51static struct irda_entry irda_dirs[] = { 51static const struct irda_entry irda_dirs[] = {
52 {"discovery", &discovery_seq_fops}, 52 {"discovery", &discovery_seq_fops},
53 {"irttp", &irttp_seq_fops}, 53 {"irttp", &irttp_seq_fops},
54 {"irlmp", &irlmp_seq_fops}, 54 {"irlmp", &irlmp_seq_fops},
diff --git a/net/key/af_key.c b/net/key/af_key.c
index dba9abd27f90..4e98193dfa0f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3705,7 +3705,7 @@ static void pfkey_seq_stop(struct seq_file *f, void *v)
3705 read_unlock(&pfkey_table_lock); 3705 read_unlock(&pfkey_table_lock);
3706} 3706}
3707 3707
3708static struct seq_operations pfkey_seq_ops = { 3708static const struct seq_operations pfkey_seq_ops = {
3709 .start = pfkey_seq_start, 3709 .start = pfkey_seq_start,
3710 .next = pfkey_seq_next, 3710 .next = pfkey_seq_next,
3711 .stop = pfkey_seq_stop, 3711 .stop = pfkey_seq_stop,
@@ -3718,7 +3718,7 @@ static int pfkey_seq_open(struct inode *inode, struct file *file)
3718 sizeof(struct seq_net_private)); 3718 sizeof(struct seq_net_private));
3719} 3719}
3720 3720
3721static struct file_operations pfkey_proc_ops = { 3721static const struct file_operations pfkey_proc_ops = {
3722 .open = pfkey_seq_open, 3722 .open = pfkey_seq_open,
3723 .read = seq_read, 3723 .read = seq_read,
3724 .llseek = seq_lseek, 3724 .llseek = seq_lseek,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 9208cf5f2bd5..c45eee1c0e8d 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
914 struct llc_sock *llc = llc_sk(sk); 914 struct llc_sock *llc = llc_sk(sk);
915 int rc = 0; 915 int rc = 0;
916 916
917 memset(&sllc, 0, sizeof(sllc));
917 lock_sock(sk); 918 lock_sock(sk);
918 if (sock_flag(sk, SOCK_ZAPPED)) 919 if (sock_flag(sk, SOCK_ZAPPED))
919 goto out; 920 goto out;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 7dd77b6d4c9a..4d5543af3123 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -13,8 +13,7 @@ config MAC80211
13comment "CFG80211 needs to be enabled for MAC80211" 13comment "CFG80211 needs to be enabled for MAC80211"
14 depends on CFG80211=n 14 depends on CFG80211=n
15 15
16menu "Rate control algorithm selection" 16if MAC80211 != n
17 depends on MAC80211 != n
18 17
19config MAC80211_RC_PID 18config MAC80211_RC_PID
20 bool "PID controller based rate control algorithm" if EMBEDDED 19 bool "PID controller based rate control algorithm" if EMBEDDED
@@ -61,17 +60,17 @@ config MAC80211_RC_DEFAULT
61 default "pid" if MAC80211_RC_DEFAULT_PID 60 default "pid" if MAC80211_RC_DEFAULT_PID
62 default "" 61 default ""
63 62
64endmenu 63endif
65 64
66config MAC80211_MESH 65config MAC80211_MESH
67 bool "Enable mac80211 mesh networking (pre-802.11s) support" 66 bool "Enable mac80211 mesh networking (pre-802.11s) support"
68 depends on MAC80211 && EXPERIMENTAL 67 depends on MAC80211 && EXPERIMENTAL
69 depends on BROKEN
70 ---help--- 68 ---help---
71 This options enables support of Draft 802.11s mesh networking. 69 This options enables support of Draft 802.11s mesh networking.
72 The implementation is based on Draft 1.08 of the Mesh Networking 70 The implementation is based on Draft 2.08 of the Mesh Networking
73 amendment. For more information visit http://o11s.org/. 71 amendment. However, no compliance with that draft is claimed or even
74 72 possible, as drafts leave a number of identifiers to be defined after
73 ratification. For more information visit http://o11s.org/.
75 74
76config MAC80211_LEDS 75config MAC80211_LEDS
77 bool "Enable LED triggers" 76 bool "Enable LED triggers"
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 1958c7c42cd9..bd765f30dba2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -381,6 +381,14 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
381 &local->hw, queue, 381 &local->hw, queue,
382 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 382 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
383 383
384 if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
385 return;
386
387 if (WARN(!sta->ampdu_mlme.tid_tx[tid],
388 "TID %d gone but expected when splicing aggregates from"
389 "the pending queue\n", tid))
390 return;
391
384 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { 392 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
385 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 393 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
386 /* copy over remaining packets */ 394 /* copy over remaining packets */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4bbf5007799b..5608f6c68413 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -323,6 +323,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
323{ 323{
324 struct ieee80211_sub_if_data *sdata = sta->sdata; 324 struct ieee80211_sub_if_data *sdata = sta->sdata;
325 325
326 sinfo->generation = sdata->local->sta_generation;
327
326 sinfo->filled = STATION_INFO_INACTIVE_TIME | 328 sinfo->filled = STATION_INFO_INACTIVE_TIME |
327 STATION_INFO_RX_BYTES | 329 STATION_INFO_RX_BYTES |
328 STATION_INFO_TX_BYTES | 330 STATION_INFO_TX_BYTES |
@@ -909,6 +911,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
909 else 911 else
910 memset(next_hop, 0, ETH_ALEN); 912 memset(next_hop, 0, ETH_ALEN);
911 913
914 pinfo->generation = mesh_paths_generation;
915
912 pinfo->filled = MPATH_INFO_FRAME_QLEN | 916 pinfo->filled = MPATH_INFO_FRAME_QLEN |
913 MPATH_INFO_DSN | 917 MPATH_INFO_DSN |
914 MPATH_INFO_METRIC | 918 MPATH_INFO_METRIC |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e9ec6cae2d39..61234e79022b 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -116,6 +116,8 @@ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
116 116
117#ifdef CONFIG_MAC80211_MESH 117#ifdef CONFIG_MAC80211_MESH
118/* Mesh stats attributes */ 118/* Mesh stats attributes */
119IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
120IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
119IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); 121IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
120IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); 122IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
121IEEE80211_IF_FILE(dropped_frames_no_route, 123IEEE80211_IF_FILE(dropped_frames_no_route,
@@ -205,6 +207,8 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
205{ 207{
206 sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats", 208 sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats",
207 sdata->debugfsdir); 209 sdata->debugfsdir);
210 MESHSTATS_ADD(fwded_mcast);
211 MESHSTATS_ADD(fwded_unicast);
208 MESHSTATS_ADD(fwded_frames); 212 MESHSTATS_ADD(fwded_frames);
209 MESHSTATS_ADD(dropped_frames_ttl); 213 MESHSTATS_ADD(dropped_frames_ttl);
210 MESHSTATS_ADD(dropped_frames_no_route); 214 MESHSTATS_ADD(dropped_frames_no_route);
@@ -327,6 +331,8 @@ static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
327 331
328static void del_mesh_stats(struct ieee80211_sub_if_data *sdata) 332static void del_mesh_stats(struct ieee80211_sub_if_data *sdata)
329{ 333{
334 MESHSTATS_DEL(fwded_mcast);
335 MESHSTATS_DEL(fwded_unicast);
330 MESHSTATS_DEL(fwded_frames); 336 MESHSTATS_DEL(fwded_frames);
331 MESHSTATS_DEL(dropped_frames_ttl); 337 MESHSTATS_DEL(dropped_frames_ttl);
332 MESHSTATS_DEL(dropped_frames_no_route); 338 MESHSTATS_DEL(dropped_frames_no_route);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4100c361a99d..020a94a31106 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -12,7 +12,11 @@ static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
12 12
13static inline int drv_start(struct ieee80211_local *local) 13static inline int drv_start(struct ieee80211_local *local)
14{ 14{
15 int ret = local->ops->start(&local->hw); 15 int ret;
16
17 local->started = true;
18 smp_mb();
19 ret = local->ops->start(&local->hw);
16 trace_drv_start(local, ret); 20 trace_drv_start(local, ret);
17 return ret; 21 return ret;
18} 22}
@@ -21,6 +25,14 @@ static inline void drv_stop(struct ieee80211_local *local)
21{ 25{
22 local->ops->stop(&local->hw); 26 local->ops->stop(&local->hw);
23 trace_drv_stop(local); 27 trace_drv_stop(local);
28
29 /* sync away all work on the tasklet before clearing started */
30 tasklet_disable(&local->tasklet);
31 tasklet_enable(&local->tasklet);
32
33 barrier();
34
35 local->started = false;
24} 36}
25 37
26static inline int drv_add_interface(struct ieee80211_local *local, 38static inline int drv_add_interface(struct ieee80211_local *local,
@@ -55,16 +67,32 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
55 trace_drv_bss_info_changed(local, vif, info, changed); 67 trace_drv_bss_info_changed(local, vif, info, changed);
56} 68}
57 69
70static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
71 int mc_count,
72 struct dev_addr_list *mc_list)
73{
74 u64 ret = 0;
75
76 if (local->ops->prepare_multicast)
77 ret = local->ops->prepare_multicast(&local->hw, mc_count,
78 mc_list);
79
80 trace_drv_prepare_multicast(local, mc_count, ret);
81
82 return ret;
83}
84
58static inline void drv_configure_filter(struct ieee80211_local *local, 85static inline void drv_configure_filter(struct ieee80211_local *local,
59 unsigned int changed_flags, 86 unsigned int changed_flags,
60 unsigned int *total_flags, 87 unsigned int *total_flags,
61 int mc_count, 88 u64 multicast)
62 struct dev_addr_list *mc_list)
63{ 89{
90 might_sleep();
91
64 local->ops->configure_filter(&local->hw, changed_flags, total_flags, 92 local->ops->configure_filter(&local->hw, changed_flags, total_flags,
65 mc_count, mc_list); 93 multicast);
66 trace_drv_configure_filter(local, changed_flags, total_flags, 94 trace_drv_configure_filter(local, changed_flags, total_flags,
67 mc_count); 95 multicast);
68} 96}
69 97
70static inline int drv_set_tim(struct ieee80211_local *local, 98static inline int drv_set_tim(struct ieee80211_local *local,
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 5a10da2d70fd..37b9051afcf3 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -191,31 +191,55 @@ TRACE_EVENT(drv_bss_info_changed,
191 ) 191 )
192); 192);
193 193
194TRACE_EVENT(drv_prepare_multicast,
195 TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret),
196
197 TP_ARGS(local, mc_count, ret),
198
199 TP_STRUCT__entry(
200 LOCAL_ENTRY
201 __field(int, mc_count)
202 __field(u64, ret)
203 ),
204
205 TP_fast_assign(
206 LOCAL_ASSIGN;
207 __entry->mc_count = mc_count;
208 __entry->ret = ret;
209 ),
210
211 TP_printk(
212 LOCAL_PR_FMT " prepare mc (%d): %llx",
213 LOCAL_PR_ARG, __entry->mc_count,
214 (unsigned long long) __entry->ret
215 )
216);
217
194TRACE_EVENT(drv_configure_filter, 218TRACE_EVENT(drv_configure_filter,
195 TP_PROTO(struct ieee80211_local *local, 219 TP_PROTO(struct ieee80211_local *local,
196 unsigned int changed_flags, 220 unsigned int changed_flags,
197 unsigned int *total_flags, 221 unsigned int *total_flags,
198 int mc_count), 222 u64 multicast),
199 223
200 TP_ARGS(local, changed_flags, total_flags, mc_count), 224 TP_ARGS(local, changed_flags, total_flags, multicast),
201 225
202 TP_STRUCT__entry( 226 TP_STRUCT__entry(
203 LOCAL_ENTRY 227 LOCAL_ENTRY
204 __field(unsigned int, changed) 228 __field(unsigned int, changed)
205 __field(unsigned int, total) 229 __field(unsigned int, total)
206 __field(int, mc) 230 __field(u64, multicast)
207 ), 231 ),
208 232
209 TP_fast_assign( 233 TP_fast_assign(
210 LOCAL_ASSIGN; 234 LOCAL_ASSIGN;
211 __entry->changed = changed_flags; 235 __entry->changed = changed_flags;
212 __entry->total = *total_flags; 236 __entry->total = *total_flags;
213 __entry->mc = mc_count; 237 __entry->multicast = multicast;
214 ), 238 ),
215 239
216 TP_printk( 240 TP_printk(
217 LOCAL_PR_FMT " changed:%#x total:%#x mc:%d", 241 LOCAL_PR_FMT " changed:%#x total:%#x",
218 LOCAL_PR_ARG, __entry->changed, __entry->total, __entry->mc 242 LOCAL_PR_ARG, __entry->changed, __entry->total
219 ) 243 )
220); 244);
221 245
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 630a438180fd..588005c84a6d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -212,7 +212,9 @@ struct ieee80211_if_vlan {
212}; 212};
213 213
214struct mesh_stats { 214struct mesh_stats {
215 __u32 fwded_frames; /* Mesh forwarded frames */ 215 __u32 fwded_mcast; /* Mesh forwarded multicast frames */
216 __u32 fwded_unicast; /* Mesh forwarded unicast frames */
217 __u32 fwded_frames; /* Mesh total forwarded frames */
216 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ 218 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
217 __u32 dropped_frames_no_route; /* Not transmitted, no route found */ 219 __u32 dropped_frames_no_route; /* Not transmitted, no route found */
218 atomic_t estab_plinks; 220 atomic_t estab_plinks;
@@ -284,6 +286,7 @@ struct ieee80211_if_managed {
284 286
285 struct mutex mtx; 287 struct mutex mtx;
286 struct ieee80211_bss *associated; 288 struct ieee80211_bss *associated;
289 struct ieee80211_mgd_work *old_associate_work;
287 struct list_head work_list; 290 struct list_head work_list;
288 291
289 u8 bssid[ETH_ALEN]; 292 u8 bssid[ETH_ALEN];
@@ -354,7 +357,7 @@ struct ieee80211_if_mesh {
354 357
355 unsigned long timers_running; 358 unsigned long timers_running;
356 359
357 bool housekeeping; 360 unsigned long wrkq_flags;
358 361
359 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 362 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
360 size_t mesh_id_len; 363 size_t mesh_id_len;
@@ -364,6 +367,10 @@ struct ieee80211_if_mesh {
364 u8 mesh_pm_id[4]; 367 u8 mesh_pm_id[4];
365 /* Congestion Control Mode Identifier */ 368 /* Congestion Control Mode Identifier */
366 u8 mesh_cc_id[4]; 369 u8 mesh_cc_id[4];
370 /* Synchronization Protocol Identifier */
371 u8 mesh_sp_id[4];
372 /* Authentication Protocol Identifier */
373 u8 mesh_auth_id[4];
367 /* Local mesh Destination Sequence Number */ 374 /* Local mesh Destination Sequence Number */
368 u32 dsn; 375 u32 dsn;
369 /* Last used PREQ ID */ 376 /* Last used PREQ ID */
@@ -505,6 +512,8 @@ struct ieee80211_sub_if_data {
505#ifdef CONFIG_MAC80211_MESH 512#ifdef CONFIG_MAC80211_MESH
506 struct dentry *mesh_stats_dir; 513 struct dentry *mesh_stats_dir;
507 struct { 514 struct {
515 struct dentry *fwded_mcast;
516 struct dentry *fwded_unicast;
508 struct dentry *fwded_frames; 517 struct dentry *fwded_frames;
509 struct dentry *dropped_frames_ttl; 518 struct dentry *dropped_frames_ttl;
510 struct dentry *dropped_frames_no_route; 519 struct dentry *dropped_frames_no_route;
@@ -635,6 +644,9 @@ struct ieee80211_local {
635 /* protects the aggregated multicast list and filter calls */ 644 /* protects the aggregated multicast list and filter calls */
636 spinlock_t filter_lock; 645 spinlock_t filter_lock;
637 646
647 /* used for uploading changed mc list */
648 struct work_struct reconfig_filter;
649
638 /* aggregated multicast list */ 650 /* aggregated multicast list */
639 struct dev_addr_list *mc_list; 651 struct dev_addr_list *mc_list;
640 int mc_count; 652 int mc_count;
@@ -655,6 +667,9 @@ struct ieee80211_local {
655 */ 667 */
656 bool quiescing; 668 bool quiescing;
657 669
670 /* device is started */
671 bool started;
672
658 int tx_headroom; /* required headroom for hardware/radiotap */ 673 int tx_headroom; /* required headroom for hardware/radiotap */
659 674
660 /* Tasklet and skb queue to process calls from IRQ mode. All frames 675 /* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -677,6 +692,7 @@ struct ieee80211_local {
677 struct list_head sta_list; 692 struct list_head sta_list;
678 struct sta_info *sta_hash[STA_HASH_SIZE]; 693 struct sta_info *sta_hash[STA_HASH_SIZE];
679 struct timer_list sta_cleanup; 694 struct timer_list sta_cleanup;
695 int sta_generation;
680 696
681 struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; 697 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
682 struct tasklet_struct tx_pending_tasklet; 698 struct tasklet_struct tx_pending_tasklet;
@@ -713,7 +729,7 @@ struct ieee80211_local {
713 struct mutex scan_mtx; 729 struct mutex scan_mtx;
714 unsigned long scanning; 730 unsigned long scanning;
715 struct cfg80211_ssid scan_ssid; 731 struct cfg80211_ssid scan_ssid;
716 struct cfg80211_scan_request int_scan_req; 732 struct cfg80211_scan_request *int_scan_req;
717 struct cfg80211_scan_request *scan_req; 733 struct cfg80211_scan_request *scan_req;
718 struct ieee80211_channel *scan_channel; 734 struct ieee80211_channel *scan_channel;
719 const u8 *orig_ies; 735 const u8 *orig_ies;
@@ -1034,8 +1050,10 @@ void ieee80211_recalc_idle(struct ieee80211_local *local);
1034/* tx handling */ 1050/* tx handling */
1035void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1051void ieee80211_clear_tx_pending(struct ieee80211_local *local);
1036void ieee80211_tx_pending(unsigned long data); 1052void ieee80211_tx_pending(unsigned long data);
1037int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); 1053netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1038int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); 1054 struct net_device *dev);
1055netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1056 struct net_device *dev);
1039 1057
1040/* HT */ 1058/* HT */
1041void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 1059void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
@@ -1073,6 +1091,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1073 1091
1074/* Suspend/resume and hw reconfiguration */ 1092/* Suspend/resume and hw reconfiguration */
1075int ieee80211_reconfig(struct ieee80211_local *local); 1093int ieee80211_reconfig(struct ieee80211_local *local);
1094void ieee80211_stop_device(struct ieee80211_local *local);
1076 1095
1077#ifdef CONFIG_PM 1096#ifdef CONFIG_PM
1078int __ieee80211_suspend(struct ieee80211_hw *hw); 1097int __ieee80211_suspend(struct ieee80211_hw *hw);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e8fb03b91a44..f6005adcbf90 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -227,9 +227,7 @@ static int ieee80211_open(struct net_device *dev)
227 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) 227 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
228 local->fif_other_bss++; 228 local->fif_other_bss++;
229 229
230 spin_lock_bh(&local->filter_lock);
231 ieee80211_configure_filter(local); 230 ieee80211_configure_filter(local);
232 spin_unlock_bh(&local->filter_lock);
233 break; 231 break;
234 default: 232 default:
235 conf.vif = &sdata->vif; 233 conf.vif = &sdata->vif;
@@ -241,17 +239,13 @@ static int ieee80211_open(struct net_device *dev)
241 239
242 if (ieee80211_vif_is_mesh(&sdata->vif)) { 240 if (ieee80211_vif_is_mesh(&sdata->vif)) {
243 local->fif_other_bss++; 241 local->fif_other_bss++;
244 spin_lock_bh(&local->filter_lock);
245 ieee80211_configure_filter(local); 242 ieee80211_configure_filter(local);
246 spin_unlock_bh(&local->filter_lock);
247 243
248 ieee80211_start_mesh(sdata); 244 ieee80211_start_mesh(sdata);
249 } else if (sdata->vif.type == NL80211_IFTYPE_AP) { 245 } else if (sdata->vif.type == NL80211_IFTYPE_AP) {
250 local->fif_pspoll++; 246 local->fif_pspoll++;
251 247
252 spin_lock_bh(&local->filter_lock);
253 ieee80211_configure_filter(local); 248 ieee80211_configure_filter(local);
254 spin_unlock_bh(&local->filter_lock);
255 } 249 }
256 250
257 changed |= ieee80211_reset_erp_info(sdata); 251 changed |= ieee80211_reset_erp_info(sdata);
@@ -283,11 +277,6 @@ static int ieee80211_open(struct net_device *dev)
283 } 277 }
284 } 278 }
285 279
286 if (local->open_count == 0) {
287 tasklet_enable(&local->tx_pending_tasklet);
288 tasklet_enable(&local->tasklet);
289 }
290
291 /* 280 /*
292 * set_multicast_list will be invoked by the networking core 281 * set_multicast_list will be invoked by the networking core
293 * which will check whether any increments here were done in 282 * which will check whether any increments here were done in
@@ -404,10 +393,11 @@ static int ieee80211_stop(struct net_device *dev)
404 spin_lock_bh(&local->filter_lock); 393 spin_lock_bh(&local->filter_lock);
405 __dev_addr_unsync(&local->mc_list, &local->mc_count, 394 __dev_addr_unsync(&local->mc_list, &local->mc_count,
406 &dev->mc_list, &dev->mc_count); 395 &dev->mc_list, &dev->mc_count);
407 ieee80211_configure_filter(local);
408 spin_unlock_bh(&local->filter_lock); 396 spin_unlock_bh(&local->filter_lock);
409 netif_addr_unlock_bh(dev); 397 netif_addr_unlock_bh(dev);
410 398
399 ieee80211_configure_filter(local);
400
411 del_timer_sync(&local->dynamic_ps_timer); 401 del_timer_sync(&local->dynamic_ps_timer);
412 cancel_work_sync(&local->dynamic_ps_enable_work); 402 cancel_work_sync(&local->dynamic_ps_enable_work);
413 403
@@ -458,9 +448,7 @@ static int ieee80211_stop(struct net_device *dev)
458 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) 448 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
459 local->fif_other_bss--; 449 local->fif_other_bss--;
460 450
461 spin_lock_bh(&local->filter_lock);
462 ieee80211_configure_filter(local); 451 ieee80211_configure_filter(local);
463 spin_unlock_bh(&local->filter_lock);
464 break; 452 break;
465 case NL80211_IFTYPE_STATION: 453 case NL80211_IFTYPE_STATION:
466 del_timer_sync(&sdata->u.mgd.chswitch_timer); 454 del_timer_sync(&sdata->u.mgd.chswitch_timer);
@@ -503,38 +491,14 @@ static int ieee80211_stop(struct net_device *dev)
503 local->fif_other_bss--; 491 local->fif_other_bss--;
504 atomic_dec(&local->iff_allmultis); 492 atomic_dec(&local->iff_allmultis);
505 493
506 spin_lock_bh(&local->filter_lock);
507 ieee80211_configure_filter(local); 494 ieee80211_configure_filter(local);
508 spin_unlock_bh(&local->filter_lock);
509 495
510 ieee80211_stop_mesh(sdata); 496 ieee80211_stop_mesh(sdata);
511 } 497 }
512 /* fall through */ 498 /* fall through */
513 default: 499 default:
514 if (local->scan_sdata == sdata) { 500 if (local->scan_sdata == sdata)
515 if (!local->ops->hw_scan) 501 ieee80211_scan_cancel(local);
516 cancel_delayed_work_sync(&local->scan_work);
517 /*
518 * The software scan can no longer run now, so we can
519 * clear out the scan_sdata reference. However, the
520 * hardware scan may still be running. The complete
521 * function must be prepared to handle a NULL value.
522 */
523 local->scan_sdata = NULL;
524 /*
525 * The memory barrier guarantees that another CPU
526 * that is hardware-scanning will now see the fact
527 * that this interface is gone.
528 */
529 smp_mb();
530 /*
531 * If software scanning, complete the scan but since
532 * the scan_sdata is NULL already don't send out a
533 * scan event to userspace -- the scan is incomplete.
534 */
535 if (test_bit(SCAN_SW_SCANNING, &local->scanning))
536 ieee80211_scan_completed(&local->hw, true);
537 }
538 502
539 /* 503 /*
540 * Disable beaconing for AP and mesh, IBSS can't 504 * Disable beaconing for AP and mesh, IBSS can't
@@ -561,14 +525,8 @@ static int ieee80211_stop(struct net_device *dev)
561 ieee80211_recalc_ps(local, -1); 525 ieee80211_recalc_ps(local, -1);
562 526
563 if (local->open_count == 0) { 527 if (local->open_count == 0) {
564 drv_stop(local); 528 ieee80211_clear_tx_pending(local);
565 529 ieee80211_stop_device(local);
566 ieee80211_led_radio(local, false);
567
568 flush_workqueue(local->workqueue);
569
570 tasklet_disable(&local->tx_pending_tasklet);
571 tasklet_disable(&local->tasklet);
572 530
573 /* no reconfiguring after stop! */ 531 /* no reconfiguring after stop! */
574 hw_reconf_flags = 0; 532 hw_reconf_flags = 0;
@@ -622,8 +580,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
622 spin_lock_bh(&local->filter_lock); 580 spin_lock_bh(&local->filter_lock);
623 __dev_addr_sync(&local->mc_list, &local->mc_count, 581 __dev_addr_sync(&local->mc_list, &local->mc_count,
624 &dev->mc_list, &dev->mc_count); 582 &dev->mc_list, &dev->mc_count);
625 ieee80211_configure_filter(local);
626 spin_unlock_bh(&local->filter_lock); 583 spin_unlock_bh(&local->filter_lock);
584 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
627} 585}
628 586
629/* 587/*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0c4f8e122ed6..797f53942e5f 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -50,9 +50,9 @@ struct ieee80211_tx_status_rtap_hdr {
50} __attribute__ ((packed)); 50} __attribute__ ((packed));
51 51
52 52
53/* must be called under mdev tx lock */
54void ieee80211_configure_filter(struct ieee80211_local *local) 53void ieee80211_configure_filter(struct ieee80211_local *local)
55{ 54{
55 u64 mc;
56 unsigned int changed_flags; 56 unsigned int changed_flags;
57 unsigned int new_flags = 0; 57 unsigned int new_flags = 0;
58 58
@@ -62,7 +62,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
62 if (atomic_read(&local->iff_allmultis)) 62 if (atomic_read(&local->iff_allmultis))
63 new_flags |= FIF_ALLMULTI; 63 new_flags |= FIF_ALLMULTI;
64 64
65 if (local->monitors) 65 if (local->monitors || local->scanning)
66 new_flags |= FIF_BCN_PRBRESP_PROMISC; 66 new_flags |= FIF_BCN_PRBRESP_PROMISC;
67 67
68 if (local->fif_fcsfail) 68 if (local->fif_fcsfail)
@@ -80,20 +80,30 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
80 if (local->fif_pspoll) 80 if (local->fif_pspoll)
81 new_flags |= FIF_PSPOLL; 81 new_flags |= FIF_PSPOLL;
82 82
83 spin_lock_bh(&local->filter_lock);
83 changed_flags = local->filter_flags ^ new_flags; 84 changed_flags = local->filter_flags ^ new_flags;
84 85
86 mc = drv_prepare_multicast(local, local->mc_count, local->mc_list);
87 spin_unlock_bh(&local->filter_lock);
88
85 /* be a bit nasty */ 89 /* be a bit nasty */
86 new_flags |= (1<<31); 90 new_flags |= (1<<31);
87 91
88 drv_configure_filter(local, changed_flags, &new_flags, 92 drv_configure_filter(local, changed_flags, &new_flags, mc);
89 local->mc_count,
90 local->mc_list);
91 93
92 WARN_ON(new_flags & (1<<31)); 94 WARN_ON(new_flags & (1<<31));
93 95
94 local->filter_flags = new_flags & ~(1<<31); 96 local->filter_flags = new_flags & ~(1<<31);
95} 97}
96 98
99static void ieee80211_reconfig_filter(struct work_struct *work)
100{
101 struct ieee80211_local *local =
102 container_of(work, struct ieee80211_local, reconfig_filter);
103
104 ieee80211_configure_filter(local);
105}
106
97int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) 107int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
98{ 108{
99 struct ieee80211_channel *chan, *scan_chan; 109 struct ieee80211_channel *chan, *scan_chan;
@@ -231,9 +241,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
231 241
232 drv_bss_info_changed(local, &sdata->vif, 242 drv_bss_info_changed(local, &sdata->vif,
233 &sdata->vif.bss_conf, changed); 243 &sdata->vif.bss_conf, changed);
234
235 /* DEPRECATED */
236 local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int;
237} 244}
238 245
239u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 246u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -475,6 +482,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
475 } 482 }
476 483
477 rate_control_tx_status(local, sband, sta, skb); 484 rate_control_tx_status(local, sband, sta, skb);
485 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
486 ieee80211s_update_metric(local, sta, skb);
478 } 487 }
479 488
480 rcu_read_unlock(); 489 rcu_read_unlock();
@@ -677,7 +686,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
677 local->hw.max_rates = 1; 686 local->hw.max_rates = 1;
678 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 687 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
679 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 688 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
680 local->hw.conf.radio_enabled = true;
681 local->user_power_level = -1; 689 local->user_power_level = -1;
682 690
683 INIT_LIST_HEAD(&local->interfaces); 691 INIT_LIST_HEAD(&local->interfaces);
@@ -692,6 +700,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
692 700
693 INIT_WORK(&local->restart_work, ieee80211_restart_work); 701 INIT_WORK(&local->restart_work, ieee80211_restart_work);
694 702
703 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
704
695 INIT_WORK(&local->dynamic_ps_enable_work, 705 INIT_WORK(&local->dynamic_ps_enable_work,
696 ieee80211_dynamic_ps_enable_work); 706 ieee80211_dynamic_ps_enable_work);
697 INIT_WORK(&local->dynamic_ps_disable_work, 707 INIT_WORK(&local->dynamic_ps_disable_work,
@@ -705,12 +715,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
705 skb_queue_head_init(&local->pending[i]); 715 skb_queue_head_init(&local->pending[i]);
706 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 716 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
707 (unsigned long)local); 717 (unsigned long)local);
708 tasklet_disable(&local->tx_pending_tasklet);
709 718
710 tasklet_init(&local->tasklet, 719 tasklet_init(&local->tasklet,
711 ieee80211_tasklet_handler, 720 ieee80211_tasklet_handler,
712 (unsigned long) local); 721 (unsigned long) local);
713 tasklet_disable(&local->tasklet);
714 722
715 skb_queue_head_init(&local->skb_queue); 723 skb_queue_head_init(&local->skb_queue);
716 skb_queue_head_init(&local->skb_queue_unreliable); 724 skb_queue_head_init(&local->skb_queue_unreliable);
@@ -765,9 +773,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
765 supp_ht = supp_ht || sband->ht_cap.ht_supported; 773 supp_ht = supp_ht || sband->ht_cap.ht_supported;
766 } 774 }
767 775
768 local->int_scan_req.n_channels = channels; 776 local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
769 local->int_scan_req.channels = kzalloc(sizeof(void *) * channels, GFP_KERNEL); 777 sizeof(void *) * channels, GFP_KERNEL);
770 if (!local->int_scan_req.channels) 778 if (!local->int_scan_req)
771 return -ENOMEM; 779 return -ENOMEM;
772 780
773 /* if low-level driver supports AP, we also support VLAN */ 781 /* if low-level driver supports AP, we also support VLAN */
@@ -882,13 +890,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
882 890
883 /* alloc internal scan request */ 891 /* alloc internal scan request */
884 i = 0; 892 i = 0;
885 local->int_scan_req.ssids = &local->scan_ssid; 893 local->int_scan_req->ssids = &local->scan_ssid;
886 local->int_scan_req.n_ssids = 1; 894 local->int_scan_req->n_ssids = 1;
887 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 895 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
888 if (!hw->wiphy->bands[band]) 896 if (!hw->wiphy->bands[band])
889 continue; 897 continue;
890 for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) { 898 for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) {
891 local->int_scan_req.channels[i] = 899 local->int_scan_req->channels[i] =
892 &hw->wiphy->bands[band]->channels[j]; 900 &hw->wiphy->bands[band]->channels[j];
893 i++; 901 i++;
894 } 902 }
@@ -920,7 +928,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
920 fail_workqueue: 928 fail_workqueue:
921 wiphy_unregister(local->hw.wiphy); 929 wiphy_unregister(local->hw.wiphy);
922 fail_wiphy_register: 930 fail_wiphy_register:
923 kfree(local->int_scan_req.channels); 931 kfree(local->int_scan_req);
924 return result; 932 return result;
925} 933}
926EXPORT_SYMBOL(ieee80211_register_hw); 934EXPORT_SYMBOL(ieee80211_register_hw);
@@ -946,6 +954,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
946 954
947 rtnl_unlock(); 955 rtnl_unlock();
948 956
957 cancel_work_sync(&local->reconfig_filter);
958
949 ieee80211_clear_tx_pending(local); 959 ieee80211_clear_tx_pending(local);
950 sta_info_stop(local); 960 sta_info_stop(local);
951 rate_control_deinitialize(local); 961 rate_control_deinitialize(local);
@@ -962,7 +972,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
962 wiphy_unregister(local->hw.wiphy); 972 wiphy_unregister(local->hw.wiphy);
963 ieee80211_wep_free(local); 973 ieee80211_wep_free(local);
964 ieee80211_led_exit(local); 974 ieee80211_led_exit(local);
965 kfree(local->int_scan_req.channels); 975 kfree(local->int_scan_req);
966} 976}
967EXPORT_SYMBOL(ieee80211_unregister_hw); 977EXPORT_SYMBOL(ieee80211_unregister_hw);
968 978
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2f4f518ab45c..f7364e56f1ee 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -18,8 +18,11 @@
18#define PP_OFFSET 1 /* Path Selection Protocol */ 18#define PP_OFFSET 1 /* Path Selection Protocol */
19#define PM_OFFSET 5 /* Path Selection Metric */ 19#define PM_OFFSET 5 /* Path Selection Metric */
20#define CC_OFFSET 9 /* Congestion Control Mode */ 20#define CC_OFFSET 9 /* Congestion Control Mode */
21#define CAPAB_OFFSET 17 21#define SP_OFFSET 13 /* Synchronization Protocol */
22#define ACCEPT_PLINKS 0x80 22#define AUTH_OFFSET 17 /* Authentication Protocol */
23#define CAPAB_OFFSET 22
24#define CAPAB_ACCEPT_PLINKS 0x80
25#define CAPAB_FORWARDING 0x10
23 26
24#define TMR_RUNNING_HK 0 27#define TMR_RUNNING_HK 0
25#define TMR_RUNNING_MP 1 28#define TMR_RUNNING_MP 1
@@ -47,14 +50,14 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
47 struct ieee80211_local *local = sdata->local; 50 struct ieee80211_local *local = sdata->local;
48 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 51 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
49 52
50 ifmsh->housekeeping = true; 53 ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
51 54
52 if (local->quiescing) { 55 if (local->quiescing) {
53 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); 56 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
54 return; 57 return;
55 } 58 }
56 59
57 ieee80211_queue_work(local->hw.workqueue, &ifmsh->work); 60 ieee80211_queue_work(&local->hw, &ifmsh->work);
58} 61}
59 62
60/** 63/**
@@ -84,7 +87,9 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
84 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && 87 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
85 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && 88 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
86 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && 89 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
87 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) 90 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0 &&
91 memcmp(ifmsh->mesh_sp_id, ie->mesh_config + SP_OFFSET, 4) == 0 &&
92 memcmp(ifmsh->mesh_auth_id, ie->mesh_config + AUTH_OFFSET, 4) == 0)
88 return true; 93 return true;
89 94
90 return false; 95 return false;
@@ -97,7 +102,7 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
97 */ 102 */
98bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) 103bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
99{ 104{
100 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; 105 return (*(ie->mesh_config + CAPAB_OFFSET) & CAPAB_ACCEPT_PLINKS) != 0;
101} 106}
102 107
103/** 108/**
@@ -123,11 +128,18 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
123 128
124void mesh_ids_set_default(struct ieee80211_if_mesh *sta) 129void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
125{ 130{
126 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; 131 u8 oui[3] = {0x00, 0x0F, 0xAC};
127 132
128 memcpy(sta->mesh_pp_id, def_id, 4); 133 memcpy(sta->mesh_pp_id, oui, sizeof(oui));
129 memcpy(sta->mesh_pm_id, def_id, 4); 134 memcpy(sta->mesh_pm_id, oui, sizeof(oui));
130 memcpy(sta->mesh_cc_id, def_id, 4); 135 memcpy(sta->mesh_cc_id, oui, sizeof(oui));
136 memcpy(sta->mesh_sp_id, oui, sizeof(oui));
137 memcpy(sta->mesh_auth_id, oui, sizeof(oui));
138 sta->mesh_pp_id[sizeof(oui)] = 0;
139 sta->mesh_pm_id[sizeof(oui)] = 0;
140 sta->mesh_cc_id[sizeof(oui)] = 0xff;
141 sta->mesh_sp_id[sizeof(oui)] = 0xff;
142 sta->mesh_auth_id[sizeof(oui)] = 0x0;
131} 143}
132 144
133int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) 145int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -245,7 +257,7 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
245 if (sdata->u.mesh.mesh_id_len) 257 if (sdata->u.mesh.mesh_id_len)
246 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); 258 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
247 259
248 pos = skb_put(skb, 21); 260 pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN);
249 *pos++ = WLAN_EID_MESH_CONFIG; 261 *pos++ = WLAN_EID_MESH_CONFIG;
250 *pos++ = IEEE80211_MESH_CONFIG_LEN; 262 *pos++ = IEEE80211_MESH_CONFIG_LEN;
251 /* Version */ 263 /* Version */
@@ -263,15 +275,22 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
263 memcpy(pos, sdata->u.mesh.mesh_cc_id, 4); 275 memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
264 pos += 4; 276 pos += 4;
265 277
266 /* Channel precedence: 278 /* Synchronization protocol identifier */
267 * Not running simple channel unification protocol 279 memcpy(pos, sdata->u.mesh.mesh_sp_id, 4);
268 */
269 memset(pos, 0x00, 4);
270 pos += 4; 280 pos += 4;
271 281
282 /* Authentication Protocol identifier */
283 memcpy(pos, sdata->u.mesh.mesh_auth_id, 4);
284 pos += 4;
285
286 /* Mesh Formation Info */
287 memset(pos, 0x00, 1);
288 pos += 1;
289
272 /* Mesh capability */ 290 /* Mesh capability */
273 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); 291 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
274 *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00; 292 *pos = CAPAB_FORWARDING;
293 *pos++ |= sdata->u.mesh.accepting_plinks ? CAPAB_ACCEPT_PLINKS : 0x00;
275 *pos++ = 0x00; 294 *pos++ = 0x00;
276 295
277 return; 296 return;
@@ -320,30 +339,6 @@ struct mesh_table *mesh_table_alloc(int size_order)
320 return newtbl; 339 return newtbl;
321} 340}
322 341
323static void __mesh_table_free(struct mesh_table *tbl)
324{
325 kfree(tbl->hash_buckets);
326 kfree(tbl->hashwlock);
327 kfree(tbl);
328}
329
330void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
331{
332 struct hlist_head *mesh_hash;
333 struct hlist_node *p, *q;
334 int i;
335
336 mesh_hash = tbl->hash_buckets;
337 for (i = 0; i <= tbl->hash_mask; i++) {
338 spin_lock(&tbl->hashwlock[i]);
339 hlist_for_each_safe(p, q, &mesh_hash[i]) {
340 tbl->free_node(p, free_leafs);
341 atomic_dec(&tbl->entries);
342 }
343 spin_unlock(&tbl->hashwlock[i]);
344 }
345 __mesh_table_free(tbl);
346}
347 342
348static void ieee80211_mesh_path_timer(unsigned long data) 343static void ieee80211_mesh_path_timer(unsigned long data)
349{ 344{
@@ -357,63 +352,79 @@ static void ieee80211_mesh_path_timer(unsigned long data)
357 return; 352 return;
358 } 353 }
359 354
360 ieee80211_queue_work(local->hw.workqueue, &ifmsh->work); 355 ieee80211_queue_work(&local->hw, &ifmsh->work);
361} 356}
362 357
363struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 358/**
364{ 359 * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
365 struct mesh_table *newtbl; 360 * @hdr: 802.11 frame header
366 struct hlist_head *oldhash; 361 * @fc: frame control field
367 struct hlist_node *p, *q; 362 * @meshda: destination address in the mesh
368 int i; 363 * @meshsa: source address address in the mesh. Same as TA, as frame is
369 364 * locally originated.
370 if (atomic_read(&tbl->entries) 365 *
371 < tbl->mean_chain_len * (tbl->hash_mask + 1)) 366 * Return the length of the 802.11 (does not include a mesh control header)
372 goto endgrow; 367 */
373 368int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, char
374 newtbl = mesh_table_alloc(tbl->size_order + 1); 369 *meshda, char *meshsa) {
375 if (!newtbl) 370 if (is_multicast_ether_addr(meshda)) {
376 goto endgrow; 371 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
377 372 /* DA TA SA */
378 newtbl->free_node = tbl->free_node; 373 memcpy(hdr->addr1, meshda, ETH_ALEN);
379 newtbl->mean_chain_len = tbl->mean_chain_len; 374 memcpy(hdr->addr2, meshsa, ETH_ALEN);
380 newtbl->copy_node = tbl->copy_node; 375 memcpy(hdr->addr3, meshsa, ETH_ALEN);
381 atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); 376 return 24;
382 377 } else {
383 oldhash = tbl->hash_buckets; 378 *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
384 for (i = 0; i <= tbl->hash_mask; i++) 379 IEEE80211_FCTL_TODS);
385 hlist_for_each(p, &oldhash[i]) 380 /* RA TA DA SA */
386 if (tbl->copy_node(p, newtbl) < 0) 381 memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
387 goto errcopy; 382 memcpy(hdr->addr2, meshsa, ETH_ALEN);
388 383 memcpy(hdr->addr3, meshda, ETH_ALEN);
389 return newtbl; 384 memcpy(hdr->addr4, meshsa, ETH_ALEN);
390 385 return 30;
391errcopy:
392 for (i = 0; i <= newtbl->hash_mask; i++) {
393 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
394 tbl->free_node(p, 0);
395 } 386 }
396 __mesh_table_free(newtbl);
397endgrow:
398 return NULL;
399} 387}
400 388
401/** 389/**
402 * ieee80211_new_mesh_header - create a new mesh header 390 * ieee80211_new_mesh_header - create a new mesh header
403 * @meshhdr: uninitialized mesh header 391 * @meshhdr: uninitialized mesh header
404 * @sdata: mesh interface to be used 392 * @sdata: mesh interface to be used
393 * @addr4: addr4 of the mesh frame (1st in ae header)
394 * may be NULL
395 * @addr5: addr5 of the mesh frame (1st or 2nd in ae header)
396 * may be NULL unless addr6 is present
397 * @addr6: addr6 of the mesh frame (2nd or 3rd in ae header)
398 * may be NULL unless addr5 is present
405 * 399 *
406 * Return the header length. 400 * Return the header length.
407 */ 401 */
408int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 402int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
409 struct ieee80211_sub_if_data *sdata) 403 struct ieee80211_sub_if_data *sdata, char *addr4,
404 char *addr5, char *addr6)
410{ 405{
411 meshhdr->flags = 0; 406 int aelen = 0;
407 memset(meshhdr, 0, sizeof(meshhdr));
412 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; 408 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
413 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); 409 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
414 sdata->u.mesh.mesh_seqnum++; 410 sdata->u.mesh.mesh_seqnum++;
415 411 if (addr4) {
416 return 6; 412 meshhdr->flags |= MESH_FLAGS_AE_A4;
413 aelen += ETH_ALEN;
414 memcpy(meshhdr->eaddr1, addr4, ETH_ALEN);
415 }
416 if (addr5 && addr6) {
417 meshhdr->flags |= MESH_FLAGS_AE_A5_A6;
418 aelen += 2 * ETH_ALEN;
419 if (!addr4) {
420 memcpy(meshhdr->eaddr1, addr5, ETH_ALEN);
421 memcpy(meshhdr->eaddr2, addr6, ETH_ALEN);
422 } else {
423 memcpy(meshhdr->eaddr2, addr5, ETH_ALEN);
424 memcpy(meshhdr->eaddr3, addr6, ETH_ALEN);
425 }
426 }
427 return 6 + aelen;
417} 428}
418 429
419static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, 430static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
@@ -433,7 +444,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
433 if (free_plinks != sdata->u.mesh.accepting_plinks) 444 if (free_plinks != sdata->u.mesh.accepting_plinks)
434 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 445 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
435 446
436 ifmsh->housekeeping = false;
437 mod_timer(&ifmsh->housekeeping_timer, 447 mod_timer(&ifmsh->housekeeping_timer,
438 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 448 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
439} 449}
@@ -470,10 +480,12 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
470 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 480 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
471 struct ieee80211_local *local = sdata->local; 481 struct ieee80211_local *local = sdata->local;
472 482
473 ifmsh->housekeeping = true; 483 ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
474 ieee80211_queue_work(local->hw.workqueue, &ifmsh->work); 484 ieee80211_queue_work(&local->hw, &ifmsh->work);
485 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
475 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 486 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
476 BSS_CHANGED_BEACON_ENABLED); 487 BSS_CHANGED_BEACON_ENABLED |
488 BSS_CHANGED_BEACON_INT);
477} 489}
478 490
479void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 491void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -608,7 +620,13 @@ static void ieee80211_mesh_work(struct work_struct *work)
608 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) 620 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
609 mesh_path_start_discovery(sdata); 621 mesh_path_start_discovery(sdata);
610 622
611 if (ifmsh->housekeeping) 623 if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
624 mesh_mpath_table_grow();
625
626 if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
627 mesh_mpp_table_grow();
628
629 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
612 ieee80211_mesh_housekeeping(sdata, ifmsh); 630 ieee80211_mesh_housekeeping(sdata, ifmsh);
613} 631}
614 632
@@ -619,7 +637,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
619 rcu_read_lock(); 637 rcu_read_lock();
620 list_for_each_entry_rcu(sdata, &local->interfaces, list) 638 list_for_each_entry_rcu(sdata, &local->interfaces, list)
621 if (ieee80211_vif_is_mesh(&sdata->vif)) 639 if (ieee80211_vif_is_mesh(&sdata->vif))
622 ieee80211_queue_work(local->hw.workqueue, &sdata->u.mesh.work); 640 ieee80211_queue_work(&local->hw, &sdata->u.mesh.work);
623 rcu_read_unlock(); 641 rcu_read_unlock();
624} 642}
625 643
@@ -692,7 +710,7 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
692 case IEEE80211_STYPE_PROBE_RESP: 710 case IEEE80211_STYPE_PROBE_RESP:
693 case IEEE80211_STYPE_BEACON: 711 case IEEE80211_STYPE_BEACON:
694 skb_queue_tail(&ifmsh->skb_queue, skb); 712 skb_queue_tail(&ifmsh->skb_queue, skb);
695 ieee80211_queue_work(local->hw.workqueue, &ifmsh->work); 713 ieee80211_queue_work(&local->hw, &ifmsh->work);
696 return RX_QUEUED; 714 return RX_QUEUED;
697 } 715 }
698 716
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 2a2ed182cb7e..dd1c19319f0a 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -44,6 +44,23 @@ enum mesh_path_flags {
44}; 44};
45 45
46/** 46/**
47 * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks
48 *
49 *
50 *
51 * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
52 * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
53 * to grow.
54 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
55 * grow
56 */
57enum mesh_deferred_task_flags {
58 MESH_WORK_HOUSEKEEPING,
59 MESH_WORK_GROW_MPATH_TABLE,
60 MESH_WORK_GROW_MPP_TABLE,
61};
62
63/**
47 * struct mesh_path - mac80211 mesh path structure 64 * struct mesh_path - mac80211 mesh path structure
48 * 65 *
49 * @dst: mesh path destination mac address 66 * @dst: mesh path destination mac address
@@ -61,7 +78,7 @@ enum mesh_path_flags {
61 * retry 78 * retry
62 * @discovery_retries: number of discovery retries 79 * @discovery_retries: number of discovery retries
63 * @flags: mesh path flags, as specified on &enum mesh_path_flags 80 * @flags: mesh path flags, as specified on &enum mesh_path_flags
64 * @state_lock: mesh pat state lock 81 * @state_lock: mesh path state lock
65 * 82 *
66 * 83 *
67 * The combination of dst and sdata is unique in the mesh path table. Since the 84 * The combination of dst and sdata is unique in the mesh path table. Since the
@@ -174,6 +191,7 @@ struct mesh_rmc {
174 */ 191 */
175#define MESH_PATH_REFRESH_TIME 1000 192#define MESH_PATH_REFRESH_TIME 1000
176#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) 193#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
194#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
177 195
178#define MESH_MAX_PREQ_RETRIES 4 196#define MESH_MAX_PREQ_RETRIES 4
179#define MESH_PATH_EXPIRE (600 * HZ) 197#define MESH_PATH_EXPIRE (600 * HZ)
@@ -193,8 +211,11 @@ struct mesh_rmc {
193 211
194/* Public interfaces */ 212/* Public interfaces */
195/* Various */ 213/* Various */
214int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
215 char *da, char *sa);
196int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 216int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
197 struct ieee80211_sub_if_data *sdata); 217 struct ieee80211_sub_if_data *sdata, char *addr4,
218 char *addr5, char *addr6);
198int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 219int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
199 struct ieee80211_sub_if_data *sdata); 220 struct ieee80211_sub_if_data *sdata);
200bool mesh_matches_local(struct ieee802_11_elems *ie, 221bool mesh_matches_local(struct ieee802_11_elems *ie,
@@ -205,6 +226,8 @@ void mesh_mgmt_ies_add(struct sk_buff *skb,
205void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); 226void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
206int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 227int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
207void ieee80211s_init(void); 228void ieee80211s_init(void);
229void ieee80211s_update_metric(struct ieee80211_local *local,
230 struct sta_info *stainfo, struct sk_buff *skb);
208void ieee80211s_stop(void); 231void ieee80211s_stop(void);
209void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 232void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
210ieee80211_rx_result 233ieee80211_rx_result
@@ -246,7 +269,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
246/* Mesh tables */ 269/* Mesh tables */
247struct mesh_table *mesh_table_alloc(int size_order); 270struct mesh_table *mesh_table_alloc(int size_order);
248void mesh_table_free(struct mesh_table *tbl, bool free_leafs); 271void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
249struct mesh_table *mesh_table_grow(struct mesh_table *tbl); 272void mesh_mpath_table_grow(void);
273void mesh_mpp_table_grow(void);
250u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, 274u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
251 struct mesh_table *tbl); 275 struct mesh_table *tbl);
252/* Mesh paths */ 276/* Mesh paths */
@@ -265,6 +289,8 @@ void mesh_path_discard_frame(struct sk_buff *skb,
265void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 289void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
266void mesh_path_restart(struct ieee80211_sub_if_data *sdata); 290void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
267 291
292extern int mesh_paths_generation;
293
268#ifdef CONFIG_MAC80211_MESH 294#ifdef CONFIG_MAC80211_MESH
269extern int mesh_allocated; 295extern int mesh_allocated;
270 296
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 11ab71a68ff9..e12a786e26b8 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -201,6 +201,24 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
201 return 0; 201 return 0;
202} 202}
203 203
204void ieee80211s_update_metric(struct ieee80211_local *local,
205 struct sta_info *stainfo, struct sk_buff *skb)
206{
207 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
209 int failed;
210
211 if (!ieee80211_is_data(hdr->frame_control))
212 return;
213
214 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
215
216 /* moving average, scaled to 100 */
217 stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
218 if (stainfo->fail_avg > 95)
219 mesh_plink_broken(stainfo);
220}
221
204static u32 airtime_link_metric_get(struct ieee80211_local *local, 222static u32 airtime_link_metric_get(struct ieee80211_local *local,
205 struct sta_info *sta) 223 struct sta_info *sta)
206{ 224{
@@ -397,7 +415,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
397 415
398static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, 416static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
399 struct ieee80211_mgmt *mgmt, 417 struct ieee80211_mgmt *mgmt,
400 u8 *preq_elem, u32 metric) { 418 u8 *preq_elem, u32 metric)
419{
401 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 420 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
402 struct mesh_path *mpath; 421 struct mesh_path *mpath;
403 u8 *dst_addr, *orig_addr; 422 u8 *dst_addr, *orig_addr;
@@ -430,7 +449,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
430 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || 449 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
431 DSN_LT(mpath->dsn, dst_dsn)) { 450 DSN_LT(mpath->dsn, dst_dsn)) {
432 mpath->dsn = dst_dsn; 451 mpath->dsn = dst_dsn;
433 mpath->flags &= MESH_PATH_DSN_VALID; 452 mpath->flags |= MESH_PATH_DSN_VALID;
434 } else if ((!(dst_flags & MP_F_DO)) && 453 } else if ((!(dst_flags & MP_F_DO)) &&
435 (mpath->flags & MESH_PATH_ACTIVE)) { 454 (mpath->flags & MESH_PATH_ACTIVE)) {
436 reply = true; 455 reply = true;
@@ -478,6 +497,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
478 hopcount, ttl, cpu_to_le32(lifetime), 497 hopcount, ttl, cpu_to_le32(lifetime),
479 cpu_to_le32(metric), cpu_to_le32(preq_id), 498 cpu_to_le32(metric), cpu_to_le32(preq_id),
480 sdata); 499 sdata);
500 ifmsh->mshstats.fwded_mcast++;
481 ifmsh->mshstats.fwded_frames++; 501 ifmsh->mshstats.fwded_frames++;
482 } 502 }
483} 503}
@@ -536,6 +556,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
536 cpu_to_le32(lifetime), cpu_to_le32(metric), 556 cpu_to_le32(lifetime), cpu_to_le32(metric),
537 0, sdata); 557 0, sdata);
538 rcu_read_unlock(); 558 rcu_read_unlock();
559
560 sdata->u.mesh.mshstats.fwded_unicast++;
539 sdata->u.mesh.mshstats.fwded_frames++; 561 sdata->u.mesh.mshstats.fwded_frames++;
540 return; 562 return;
541 563
@@ -660,14 +682,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
660 spin_unlock(&ifmsh->mesh_preq_queue_lock); 682 spin_unlock(&ifmsh->mesh_preq_queue_lock);
661 683
662 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) 684 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
663 ieee80211_queue_work(sdata->local->hw.workqueue, &ifmsh->work); 685 ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
664 686
665 else if (time_before(jiffies, ifmsh->last_preq)) { 687 else if (time_before(jiffies, ifmsh->last_preq)) {
666 /* avoid long wait if did not send preqs for a long time 688 /* avoid long wait if did not send preqs for a long time
667 * and jiffies wrapped around 689 * and jiffies wrapped around
668 */ 690 */
669 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; 691 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
670 ieee80211_queue_work(sdata->local->hw.workqueue, &ifmsh->work); 692 ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
671 } else 693 } else
672 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + 694 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
673 min_preq_int_jiff(sdata)); 695 min_preq_int_jiff(sdata));
@@ -791,7 +813,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
791 } 813 }
792 814
793 if (mpath->flags & MESH_PATH_ACTIVE) { 815 if (mpath->flags & MESH_PATH_ACTIVE) {
794 if (time_after(jiffies, mpath->exp_time - 816 if (time_after(jiffies, mpath->exp_time +
795 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) 817 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
796 && !memcmp(sdata->dev->dev_addr, hdr->addr4, 818 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
797 ETH_ALEN) 819 ETH_ALEN)
@@ -810,10 +832,8 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
810 } 832 }
811 833
812 if (skb_queue_len(&mpath->frame_queue) >= 834 if (skb_queue_len(&mpath->frame_queue) >=
813 MESH_FRAME_QUEUE_LEN) { 835 MESH_FRAME_QUEUE_LEN)
814 skb_to_free = mpath->frame_queue.next; 836 skb_to_free = skb_dequeue(&mpath->frame_queue);
815 skb_unlink(skb_to_free, &mpath->frame_queue);
816 }
817 837
818 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 838 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
819 skb_queue_tail(&mpath->frame_queue, skb); 839 skb_queue_tail(&mpath->frame_queue, skb);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 04b9e4d61b8e..751c4d0e2b36 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -38,6 +38,71 @@ struct mpath_node {
38static struct mesh_table *mesh_paths; 38static struct mesh_table *mesh_paths;
39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
40 40
41int mesh_paths_generation;
42static void __mesh_table_free(struct mesh_table *tbl)
43{
44 kfree(tbl->hash_buckets);
45 kfree(tbl->hashwlock);
46 kfree(tbl);
47}
48
49void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
50{
51 struct hlist_head *mesh_hash;
52 struct hlist_node *p, *q;
53 int i;
54
55 mesh_hash = tbl->hash_buckets;
56 for (i = 0; i <= tbl->hash_mask; i++) {
57 spin_lock(&tbl->hashwlock[i]);
58 hlist_for_each_safe(p, q, &mesh_hash[i]) {
59 tbl->free_node(p, free_leafs);
60 atomic_dec(&tbl->entries);
61 }
62 spin_unlock(&tbl->hashwlock[i]);
63 }
64 __mesh_table_free(tbl);
65}
66
67static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
68{
69 struct mesh_table *newtbl;
70 struct hlist_head *oldhash;
71 struct hlist_node *p, *q;
72 int i;
73
74 if (atomic_read(&tbl->entries)
75 < tbl->mean_chain_len * (tbl->hash_mask + 1))
76 goto endgrow;
77
78 newtbl = mesh_table_alloc(tbl->size_order + 1);
79 if (!newtbl)
80 goto endgrow;
81
82 newtbl->free_node = tbl->free_node;
83 newtbl->mean_chain_len = tbl->mean_chain_len;
84 newtbl->copy_node = tbl->copy_node;
85 atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
86
87 oldhash = tbl->hash_buckets;
88 for (i = 0; i <= tbl->hash_mask; i++)
89 hlist_for_each(p, &oldhash[i])
90 if (tbl->copy_node(p, newtbl) < 0)
91 goto errcopy;
92
93 return newtbl;
94
95errcopy:
96 for (i = 0; i <= newtbl->hash_mask; i++) {
97 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
98 tbl->free_node(p, 0);
99 }
100 __mesh_table_free(newtbl);
101endgrow:
102 return NULL;
103}
104
105
41/* This lock will have the grow table function as writer and add / delete nodes 106/* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected 107 * as readers. When reading the table (i.e. doing lookups) we are well protected
43 * by RCU 108 * by RCU
@@ -185,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
185 */ 250 */
186int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) 251int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
187{ 252{
253 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
254 struct ieee80211_local *local = sdata->local;
188 struct mesh_path *mpath, *new_mpath; 255 struct mesh_path *mpath, *new_mpath;
189 struct mpath_node *node, *new_node; 256 struct mpath_node *node, *new_node;
190 struct hlist_head *bucket; 257 struct hlist_head *bucket;
@@ -193,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
193 int err = 0; 260 int err = 0;
194 u32 hash_idx; 261 u32 hash_idx;
195 262
196 might_sleep();
197
198 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
199 /* never add ourselves as neighbours */ 264 /* never add ourselves as neighbours */
200 return -ENOTSUPP; 265 return -ENOTSUPP;
@@ -206,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
206 return -ENOSPC; 271 return -ENOSPC;
207 272
208 err = -ENOMEM; 273 err = -ENOMEM;
209 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 274 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
210 if (!new_mpath) 275 if (!new_mpath)
211 goto err_path_alloc; 276 goto err_path_alloc;
212 277
213 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 278 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
214 if (!new_node) 279 if (!new_node)
215 goto err_node_alloc; 280 goto err_node_alloc;
216 281
@@ -243,23 +308,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
243 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 308 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
244 grow = 1; 309 grow = 1;
245 310
311 mesh_paths_generation++;
312
246 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 313 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
247 read_unlock(&pathtbl_resize_lock); 314 read_unlock(&pathtbl_resize_lock);
248 if (grow) { 315 if (grow) {
249 struct mesh_table *oldtbl, *newtbl; 316 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
250 317 ieee80211_queue_work(&local->hw, &ifmsh->work);
251 write_lock(&pathtbl_resize_lock);
252 oldtbl = mesh_paths;
253 newtbl = mesh_table_grow(mesh_paths);
254 if (!newtbl) {
255 write_unlock(&pathtbl_resize_lock);
256 return 0;
257 }
258 rcu_assign_pointer(mesh_paths, newtbl);
259 write_unlock(&pathtbl_resize_lock);
260
261 synchronize_rcu();
262 mesh_table_free(oldtbl, false);
263 } 318 }
264 return 0; 319 return 0;
265 320
@@ -274,9 +329,46 @@ err_path_alloc:
274 return err; 329 return err;
275} 330}
276 331
332void mesh_mpath_table_grow(void)
333{
334 struct mesh_table *oldtbl, *newtbl;
335
336 write_lock(&pathtbl_resize_lock);
337 oldtbl = mesh_paths;
338 newtbl = mesh_table_grow(mesh_paths);
339 if (!newtbl) {
340 write_unlock(&pathtbl_resize_lock);
341 return;
342 }
343 rcu_assign_pointer(mesh_paths, newtbl);
344 write_unlock(&pathtbl_resize_lock);
345
346 synchronize_rcu();
347 mesh_table_free(oldtbl, false);
348}
349
350void mesh_mpp_table_grow(void)
351{
352 struct mesh_table *oldtbl, *newtbl;
353
354 write_lock(&pathtbl_resize_lock);
355 oldtbl = mpp_paths;
356 newtbl = mesh_table_grow(mpp_paths);
357 if (!newtbl) {
358 write_unlock(&pathtbl_resize_lock);
359 return;
360 }
361 rcu_assign_pointer(mpp_paths, newtbl);
362 write_unlock(&pathtbl_resize_lock);
363
364 synchronize_rcu();
365 mesh_table_free(oldtbl, false);
366}
277 367
278int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 368int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
279{ 369{
370 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
371 struct ieee80211_local *local = sdata->local;
280 struct mesh_path *mpath, *new_mpath; 372 struct mesh_path *mpath, *new_mpath;
281 struct mpath_node *node, *new_node; 373 struct mpath_node *node, *new_node;
282 struct hlist_head *bucket; 374 struct hlist_head *bucket;
@@ -285,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
285 int err = 0; 377 int err = 0;
286 u32 hash_idx; 378 u32 hash_idx;
287 379
288 might_sleep();
289
290 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
291 /* never add ourselves as neighbours */ 381 /* never add ourselves as neighbours */
292 return -ENOTSUPP; 382 return -ENOTSUPP;
@@ -295,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
295 return -ENOTSUPP; 385 return -ENOTSUPP;
296 386
297 err = -ENOMEM; 387 err = -ENOMEM;
298 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 388 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
299 if (!new_mpath) 389 if (!new_mpath)
300 goto err_path_alloc; 390 goto err_path_alloc;
301 391
302 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 392 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
303 if (!new_node) 393 if (!new_node)
304 goto err_node_alloc; 394 goto err_node_alloc;
305 395
@@ -333,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
333 spin_unlock(&mpp_paths->hashwlock[hash_idx]); 423 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
334 read_unlock(&pathtbl_resize_lock); 424 read_unlock(&pathtbl_resize_lock);
335 if (grow) { 425 if (grow) {
336 struct mesh_table *oldtbl, *newtbl; 426 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
337 427 ieee80211_queue_work(&local->hw, &ifmsh->work);
338 write_lock(&pathtbl_resize_lock);
339 oldtbl = mpp_paths;
340 newtbl = mesh_table_grow(mpp_paths);
341 if (!newtbl) {
342 write_unlock(&pathtbl_resize_lock);
343 return 0;
344 }
345 rcu_assign_pointer(mpp_paths, newtbl);
346 write_unlock(&pathtbl_resize_lock);
347
348 synchronize_rcu();
349 mesh_table_free(oldtbl, false);
350 } 428 }
351 return 0; 429 return 0;
352 430
@@ -484,6 +562,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
484 562
485 err = -ENXIO; 563 err = -ENXIO;
486enddel: 564enddel:
565 mesh_paths_generation++;
487 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 566 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
488 read_unlock(&pathtbl_resize_lock); 567 read_unlock(&pathtbl_resize_lock);
489 return err; 568 return err;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index cb14253587f1..ffcbad75e09b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -409,7 +409,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
409 baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; 409 baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
410 if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { 410 if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
411 baseaddr += 4; 411 baseaddr += 4;
412 baselen -= 4; 412 baselen += 4;
413 } 413 }
414 ieee802_11_parse_elems(baseaddr, len - baselen, &elems); 414 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
415 if (!elems.peer_link) { 415 if (!elems.peer_link) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6d5a1ee0445f..97a278a2f48e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -880,10 +880,11 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
880} 880}
881 881
882static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, 882static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
883 struct ieee80211_bss *bss, 883 struct ieee80211_mgd_work *wk,
884 u32 bss_info_changed) 884 u32 bss_info_changed)
885{ 885{
886 struct ieee80211_local *local = sdata->local; 886 struct ieee80211_local *local = sdata->local;
887 struct ieee80211_bss *bss = wk->bss;
887 888
888 bss_info_changed |= BSS_CHANGED_ASSOC; 889 bss_info_changed |= BSS_CHANGED_ASSOC;
889 /* set timing information */ 890 /* set timing information */
@@ -896,6 +897,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
896 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 897 bss->cbss.capability, bss->has_erp_value, bss->erp_value);
897 898
898 sdata->u.mgd.associated = bss; 899 sdata->u.mgd.associated = bss;
900 sdata->u.mgd.old_associate_work = wk;
899 memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN); 901 memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
900 902
901 /* just to be sure */ 903 /* just to be sure */
@@ -1010,7 +1012,8 @@ ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
1010 return RX_MGMT_NONE; 1012 return RX_MGMT_NONE;
1011} 1013}
1012 1014
1013static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata) 1015static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1016 bool deauth)
1014{ 1017{
1015 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1018 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1016 struct ieee80211_local *local = sdata->local; 1019 struct ieee80211_local *local = sdata->local;
@@ -1028,6 +1031,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
1028 ifmgd->associated = NULL; 1031 ifmgd->associated = NULL;
1029 memset(ifmgd->bssid, 0, ETH_ALEN); 1032 memset(ifmgd->bssid, 0, ETH_ALEN);
1030 1033
1034 if (deauth) {
1035 kfree(ifmgd->old_associate_work);
1036 ifmgd->old_associate_work = NULL;
1037 } else {
1038 struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
1039
1040 wk->state = IEEE80211_MGD_STATE_IDLE;
1041 list_add(&wk->list, &ifmgd->work_list);
1042 }
1043
1031 /* 1044 /*
1032 * we need to commit the associated = NULL change because the 1045 * we need to commit the associated = NULL change because the
1033 * scan code uses that to determine whether this iface should 1046 * scan code uses that to determine whether this iface should
@@ -1345,7 +1358,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1345 sdata->dev->name, bssid, reason_code); 1358 sdata->dev->name, bssid, reason_code);
1346 1359
1347 if (!wk) { 1360 if (!wk) {
1348 ieee80211_set_disassoc(sdata); 1361 ieee80211_set_disassoc(sdata, true);
1349 } else { 1362 } else {
1350 list_del(&wk->list); 1363 list_del(&wk->list);
1351 kfree(wk); 1364 kfree(wk);
@@ -1378,7 +1391,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1378 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", 1391 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
1379 sdata->dev->name, reason_code); 1392 sdata->dev->name, reason_code);
1380 1393
1381 ieee80211_set_disassoc(sdata); 1394 ieee80211_set_disassoc(sdata, false);
1382 return RX_MGMT_CFG80211_DISASSOC; 1395 return RX_MGMT_CFG80211_DISASSOC;
1383} 1396}
1384 1397
@@ -1581,7 +1594,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1581 * ieee80211_set_associated() will tell the driver */ 1594 * ieee80211_set_associated() will tell the driver */
1582 bss_conf->aid = aid; 1595 bss_conf->aid = aid;
1583 bss_conf->assoc_capability = capab_info; 1596 bss_conf->assoc_capability = capab_info;
1584 ieee80211_set_associated(sdata, wk->bss, changed); 1597 /* this will take ownership of wk */
1598 ieee80211_set_associated(sdata, wk, changed);
1585 1599
1586 /* 1600 /*
1587 * Start timer to probe the connection to the AP now. 1601 * Start timer to probe the connection to the AP now.
@@ -1590,7 +1604,6 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1590 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 1604 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
1591 mod_beacon_timer(sdata); 1605 mod_beacon_timer(sdata);
1592 1606
1593 kfree(wk);
1594 return RX_MGMT_CFG80211_ASSOC; 1607 return RX_MGMT_CFG80211_ASSOC;
1595} 1608}
1596 1609
@@ -2096,7 +2109,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2096 printk(KERN_DEBUG "No probe response from AP %pM" 2109 printk(KERN_DEBUG "No probe response from AP %pM"
2097 " after %dms, disconnecting.\n", 2110 " after %dms, disconnecting.\n",
2098 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 2111 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
2099 ieee80211_set_disassoc(sdata); 2112 ieee80211_set_disassoc(sdata, true);
2100 mutex_unlock(&ifmgd->mtx); 2113 mutex_unlock(&ifmgd->mtx);
2101 /* 2114 /*
2102 * must be outside lock due to cfg80211, 2115 * must be outside lock due to cfg80211,
@@ -2110,25 +2123,9 @@ static void ieee80211_sta_work(struct work_struct *work)
2110 } 2123 }
2111 } 2124 }
2112 2125
2113 list_for_each_entry(wk, &ifmgd->work_list, list) {
2114 if (wk->state != IEEE80211_MGD_STATE_IDLE) {
2115 anybusy = true;
2116 break;
2117 }
2118 }
2119 2126
2120 ieee80211_recalc_idle(local); 2127 ieee80211_recalc_idle(local);
2121 2128
2122 if (!anybusy) {
2123 mutex_unlock(&ifmgd->mtx);
2124
2125 if (test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
2126 ieee80211_queue_delayed_work(&local->hw,
2127 &local->scan_work,
2128 round_jiffies_relative(0));
2129 return;
2130 }
2131
2132 list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) { 2129 list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
2133 if (time_is_after_jiffies(wk->timeout)) { 2130 if (time_is_after_jiffies(wk->timeout)) {
2134 /* 2131 /*
@@ -2174,6 +2171,18 @@ static void ieee80211_sta_work(struct work_struct *work)
2174 } 2171 }
2175 } 2172 }
2176 2173
2174 list_for_each_entry(wk, &ifmgd->work_list, list) {
2175 if (wk->state != IEEE80211_MGD_STATE_IDLE) {
2176 anybusy = true;
2177 break;
2178 }
2179 }
2180 if (!anybusy &&
2181 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
2182 ieee80211_queue_delayed_work(&local->hw,
2183 &local->scan_work,
2184 round_jiffies_relative(0));
2185
2177 mutex_unlock(&ifmgd->mtx); 2186 mutex_unlock(&ifmgd->mtx);
2178 2187
2179 list_for_each_entry_safe(wk, tmp, &free_work, list) { 2188 list_for_each_entry_safe(wk, tmp, &free_work, list) {
@@ -2500,7 +2509,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2500 2509
2501 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { 2510 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) {
2502 bssid = req->bss->bssid; 2511 bssid = req->bss->bssid;
2503 ieee80211_set_disassoc(sdata); 2512 ieee80211_set_disassoc(sdata, true);
2504 } else list_for_each_entry(wk, &ifmgd->work_list, list) { 2513 } else list_for_each_entry(wk, &ifmgd->work_list, list) {
2505 if (&wk->bss->cbss == req->bss) { 2514 if (&wk->bss->cbss == req->bss) {
2506 bssid = req->bss->bssid; 2515 bssid = req->bss->bssid;
@@ -2552,7 +2561,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2552 return -ENOLINK; 2561 return -ENOLINK;
2553 } 2562 }
2554 2563
2555 ieee80211_set_disassoc(sdata); 2564 ieee80211_set_disassoc(sdata, false);
2556 2565
2557 mutex_unlock(&ifmgd->mtx); 2566 mutex_unlock(&ifmgd->mtx);
2558 2567
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index a5d2f1fb4417..e535f1c988fe 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -107,17 +107,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
107 } 107 }
108 108
109 /* stop hardware - this must stop RX */ 109 /* stop hardware - this must stop RX */
110 if (local->open_count) { 110 if (local->open_count)
111 ieee80211_led_radio(local, false); 111 ieee80211_stop_device(local);
112 drv_stop(local);
113 }
114
115 /*
116 * flush again, in case driver queued work -- it
117 * shouldn't be doing (or cancel everything in the
118 * stop callback) that but better safe than sorry.
119 */
120 flush_workqueue(local->workqueue);
121 112
122 local->suspended = true; 113 local->suspended = true;
123 /* need suspended to be visible before quiescing is false */ 114 /* need suspended to be visible before quiescing is false */
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 869fe0ef951d..38bf4168fc3a 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -33,7 +33,6 @@ struct minstrel_rate {
33 33
34 /* per-rate throughput */ 34 /* per-rate throughput */
35 u32 cur_tp; 35 u32 cur_tp;
36 u32 throughput;
37 36
38 u64 succ_hist; 37 u64 succ_hist;
39 u64 att_hist; 38 u64 att_hist;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 98f480708050..a715d9454f64 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -83,7 +83,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
83 p += sprintf(p, "%3u%s", mr->bitrate / 2, 83 p += sprintf(p, "%3u%s", mr->bitrate / 2,
84 (mr->bitrate & 1 ? ".5" : " ")); 84 (mr->bitrate & 1 ? ".5" : " "));
85 85
86 tp = ((mr->cur_tp * 96) / 18000) >> 10; 86 tp = mr->cur_tp / ((18000 << 10) / 96);
87 prob = mr->cur_prob / 18; 87 prob = mr->cur_prob / 18;
88 eprob = mr->probability / 18; 88 eprob = mr->probability / 18;
89 89
@@ -139,7 +139,7 @@ minstrel_stats_release(struct inode *inode, struct file *file)
139 return 0; 139 return 0;
140} 140}
141 141
142static struct file_operations minstrel_stat_fops = { 142static const struct file_operations minstrel_stat_fops = {
143 .owner = THIS_MODULE, 143 .owner = THIS_MODULE,
144 .open = minstrel_stats_open, 144 .open = minstrel_stats_open,
145 .read = minstrel_stats_read, 145 .read = minstrel_stats_read,
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 8c053be9dc24..699d3ed869c4 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -169,19 +169,9 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
169 * still a good measurement and copy it. */ 169 * still a good measurement and copy it. */
170 if (unlikely(spinfo->tx_num_xmit == 0)) 170 if (unlikely(spinfo->tx_num_xmit == 0))
171 pf = spinfo->last_pf; 171 pf = spinfo->last_pf;
172 else { 172 else
173 /* XXX: BAD HACK!!! */
174 struct sta_info *si = container_of(sta, struct sta_info, sta);
175
176 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; 173 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
177 174
178 if (ieee80211_vif_is_mesh(&si->sdata->vif) && pf == 100)
179 mesh_plink_broken(si);
180 pf <<= RC_PID_ARITH_SHIFT;
181 si->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
182 >> RC_PID_ARITH_SHIFT;
183 }
184
185 spinfo->tx_num_xmit = 0; 175 spinfo->tx_num_xmit = 0;
186 spinfo->tx_num_failed = 0; 176 spinfo->tx_num_failed = 0;
187 177
@@ -311,7 +301,6 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
311 struct rc_pid_sta_info *spinfo = priv_sta; 301 struct rc_pid_sta_info *spinfo = priv_sta;
312 struct rc_pid_info *pinfo = priv; 302 struct rc_pid_info *pinfo = priv;
313 struct rc_pid_rateinfo *rinfo = pinfo->rinfo; 303 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
314 struct sta_info *si;
315 int i, j, tmp; 304 int i, j, tmp;
316 bool s; 305 bool s;
317 306
@@ -348,9 +337,6 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
348 } 337 }
349 338
350 spinfo->txrate_idx = rate_lowest_index(sband, sta); 339 spinfo->txrate_idx = rate_lowest_index(sband, sta);
351 /* HACK */
352 si = container_of(sta, struct sta_info, sta);
353 si->fail_avg = 0;
354} 340}
355 341
356static void *rate_control_pid_alloc(struct ieee80211_hw *hw, 342static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index a08a9b530347..a59043fbb0ff 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -198,7 +198,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
198 198
199#undef RC_PID_PRINT_BUF_SIZE 199#undef RC_PID_PRINT_BUF_SIZE
200 200
201static struct file_operations rc_pid_fop_events = { 201static const struct file_operations rc_pid_fop_events = {
202 .owner = THIS_MODULE, 202 .owner = THIS_MODULE,
203 .read = rate_control_pid_events_read, 203 .read = rate_control_pid_events_read,
204 .poll = rate_control_pid_events_poll, 204 .poll = rate_control_pid_events_poll,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 25a669c86e14..c01588f9d453 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -489,12 +489,21 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
489{ 489{
490 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 490 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
491 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 491 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
492 char *dev_addr = rx->dev->dev_addr;
492 493
493 if (ieee80211_is_data(hdr->frame_control)) { 494 if (ieee80211_is_data(hdr->frame_control)) {
494 if (!ieee80211_has_a4(hdr->frame_control)) 495 if (is_multicast_ether_addr(hdr->addr1)) {
495 return RX_DROP_MONITOR; 496 if (ieee80211_has_tods(hdr->frame_control) ||
496 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) 497 !ieee80211_has_fromds(hdr->frame_control))
497 return RX_DROP_MONITOR; 498 return RX_DROP_MONITOR;
499 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
500 return RX_DROP_MONITOR;
501 } else {
502 if (!ieee80211_has_a4(hdr->frame_control))
503 return RX_DROP_MONITOR;
504 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
505 return RX_DROP_MONITOR;
506 }
498 } 507 }
499 508
500 /* If there is not an established peer link and this is not a peer link 509 /* If there is not an established peer link and this is not a peer link
@@ -527,7 +536,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
527 536
528 if (ieee80211_is_data(hdr->frame_control) && 537 if (ieee80211_is_data(hdr->frame_control) &&
529 is_multicast_ether_addr(hdr->addr1) && 538 is_multicast_ether_addr(hdr->addr1) &&
530 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata)) 539 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
531 return RX_DROP_MONITOR; 540 return RX_DROP_MONITOR;
532#undef msh_h_get 541#undef msh_h_get
533 542
@@ -1495,7 +1504,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1495 /* illegal frame */ 1504 /* illegal frame */
1496 return RX_DROP_MONITOR; 1505 return RX_DROP_MONITOR;
1497 1506
1498 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){ 1507 if (!is_multicast_ether_addr(hdr->addr1) &&
1508 (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6)) {
1499 struct mesh_path *mppath; 1509 struct mesh_path *mppath;
1500 1510
1501 rcu_read_lock(); 1511 rcu_read_lock();
@@ -1512,7 +1522,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1512 rcu_read_unlock(); 1522 rcu_read_unlock();
1513 } 1523 }
1514 1524
1515 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) 1525 /* Frame has reached destination. Don't forward */
1526 if (!is_multicast_ether_addr(hdr->addr1) &&
1527 compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1516 return RX_CONTINUE; 1528 return RX_CONTINUE;
1517 1529
1518 mesh_hdr->ttl--; 1530 mesh_hdr->ttl--;
@@ -1532,27 +1544,32 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1532 rx->dev->name); 1544 rx->dev->name);
1533 1545
1534 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1546 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1535 /*
1536 * Save TA to addr1 to send TA a path error if a
1537 * suitable next hop is not found
1538 */
1539 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1540 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN); 1547 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1541 info = IEEE80211_SKB_CB(fwd_skb); 1548 info = IEEE80211_SKB_CB(fwd_skb);
1542 memset(info, 0, sizeof(*info)); 1549 memset(info, 0, sizeof(*info));
1543 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1550 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1544 info->control.vif = &rx->sdata->vif; 1551 info->control.vif = &rx->sdata->vif;
1545 ieee80211_select_queue(local, fwd_skb); 1552 ieee80211_select_queue(local, fwd_skb);
1546 if (is_multicast_ether_addr(fwd_hdr->addr3)) 1553 if (is_multicast_ether_addr(fwd_hdr->addr1))
1547 memcpy(fwd_hdr->addr1, fwd_hdr->addr3, 1554 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1548 ETH_ALEN); 1555 fwded_mcast);
1549 else { 1556 else {
1550 int err = mesh_nexthop_lookup(fwd_skb, sdata); 1557 int err;
1558 /*
1559 * Save TA to addr1 to send TA a path error if a
1560 * suitable next hop is not found
1561 */
1562 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1563 ETH_ALEN);
1564 err = mesh_nexthop_lookup(fwd_skb, sdata);
1551 /* Failed to immediately resolve next hop: 1565 /* Failed to immediately resolve next hop:
1552 * fwded frame was dropped or will be added 1566 * fwded frame was dropped or will be added
1553 * later to the pending skb queue. */ 1567 * later to the pending skb queue. */
1554 if (err) 1568 if (err)
1555 return RX_DROP_MONITOR; 1569 return RX_DROP_MONITOR;
1570
1571 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1572 fwded_unicast);
1556 } 1573 }
1557 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1574 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1558 fwded_frames); 1575 fwded_frames);
@@ -1560,7 +1577,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1560 } 1577 }
1561 } 1578 }
1562 1579
1563 if (is_multicast_ether_addr(hdr->addr3) || 1580 if (is_multicast_ether_addr(hdr->addr1) ||
1564 rx->dev->flags & IFF_PROMISC) 1581 rx->dev->flags & IFF_PROMISC)
1565 return RX_CONTINUE; 1582 return RX_CONTINUE;
1566 else 1583 else
@@ -2423,24 +2440,20 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2423 * This is the receive path handler. It is called by a low level driver when an 2440 * This is the receive path handler. It is called by a low level driver when an
2424 * 802.11 MPDU is received from the hardware. 2441 * 802.11 MPDU is received from the hardware.
2425 */ 2442 */
2426void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 2443void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2427{ 2444{
2428 struct ieee80211_local *local = hw_to_local(hw); 2445 struct ieee80211_local *local = hw_to_local(hw);
2429 struct ieee80211_rate *rate = NULL; 2446 struct ieee80211_rate *rate = NULL;
2430 struct ieee80211_supported_band *sband; 2447 struct ieee80211_supported_band *sband;
2431 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2448 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2432 2449
2433 if (status->band < 0 || 2450 if (WARN_ON(status->band < 0 ||
2434 status->band >= IEEE80211_NUM_BANDS) { 2451 status->band >= IEEE80211_NUM_BANDS))
2435 WARN_ON(1); 2452 goto drop;
2436 return;
2437 }
2438 2453
2439 sband = local->hw.wiphy->bands[status->band]; 2454 sband = local->hw.wiphy->bands[status->band];
2440 if (!sband) { 2455 if (WARN_ON(!sband))
2441 WARN_ON(1); 2456 goto drop;
2442 return;
2443 }
2444 2457
2445 /* 2458 /*
2446 * If we're suspending, it is possible although not too likely 2459 * If we're suspending, it is possible although not too likely
@@ -2449,16 +2462,21 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2449 * that might, for example, cause stations to be added or other 2462 * that might, for example, cause stations to be added or other
2450 * driver callbacks be invoked. 2463 * driver callbacks be invoked.
2451 */ 2464 */
2452 if (unlikely(local->quiescing || local->suspended)) { 2465 if (unlikely(local->quiescing || local->suspended))
2453 kfree_skb(skb); 2466 goto drop;
2454 return; 2467
2455 } 2468 /*
2469 * The same happens when we're not even started,
2470 * but that's worth a warning.
2471 */
2472 if (WARN_ON(!local->started))
2473 goto drop;
2456 2474
2457 if (status->flag & RX_FLAG_HT) { 2475 if (status->flag & RX_FLAG_HT) {
2458 /* rate_idx is MCS index */ 2476 /* rate_idx is MCS index */
2459 if (WARN_ON(status->rate_idx < 0 || 2477 if (WARN_ON(status->rate_idx < 0 ||
2460 status->rate_idx >= 76)) 2478 status->rate_idx >= 76))
2461 return; 2479 goto drop;
2462 /* HT rates are not in the table - use the highest legacy rate 2480 /* HT rates are not in the table - use the highest legacy rate
2463 * for now since other parts of mac80211 may not yet be fully 2481 * for now since other parts of mac80211 may not yet be fully
2464 * MCS aware. */ 2482 * MCS aware. */
@@ -2466,7 +2484,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2466 } else { 2484 } else {
2467 if (WARN_ON(status->rate_idx < 0 || 2485 if (WARN_ON(status->rate_idx < 0 ||
2468 status->rate_idx >= sband->n_bitrates)) 2486 status->rate_idx >= sband->n_bitrates))
2469 return; 2487 goto drop;
2470 rate = &sband->bitrates[status->rate_idx]; 2488 rate = &sband->bitrates[status->rate_idx];
2471 } 2489 }
2472 2490
@@ -2505,8 +2523,12 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2505 __ieee80211_rx_handle_packet(hw, skb, rate); 2523 __ieee80211_rx_handle_packet(hw, skb, rate);
2506 2524
2507 rcu_read_unlock(); 2525 rcu_read_unlock();
2526
2527 return;
2528 drop:
2529 kfree_skb(skb);
2508} 2530}
2509EXPORT_SYMBOL(__ieee80211_rx); 2531EXPORT_SYMBOL(ieee80211_rx);
2510 2532
2511/* This is a version of the rx handler that can be called from hard irq 2533/* This is a version of the rx handler that can be called from hard irq
2512 * context. Post the skb on the queue and schedule the tasklet */ 2534 * context. Post the skb on the queue and schedule the tasklet */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 244f53f3c8b4..039901109fa1 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -277,9 +277,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
277 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) 277 if (test_bit(SCAN_HW_SCANNING, &local->scanning))
278 ieee80211_restore_scan_ies(local); 278 ieee80211_restore_scan_ies(local);
279 279
280 if (local->scan_req != &local->int_scan_req) 280 if (local->scan_req != local->int_scan_req)
281 cfg80211_scan_done(local->scan_req, aborted); 281 cfg80211_scan_done(local->scan_req, aborted);
282 local->scan_req = NULL; 282 local->scan_req = NULL;
283 local->scan_sdata = NULL;
283 284
284 was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); 285 was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
285 local->scanning = 0; 286 local->scanning = 0;
@@ -292,13 +293,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
292 if (was_hw_scan) 293 if (was_hw_scan)
293 goto done; 294 goto done;
294 295
295 spin_lock_bh(&local->filter_lock); 296 ieee80211_configure_filter(local);
296 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
297 drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
298 &local->filter_flags,
299 local->mc_count,
300 local->mc_list);
301 spin_unlock_bh(&local->filter_lock);
302 297
303 drv_sw_scan_complete(local); 298 drv_sw_scan_complete(local);
304 299
@@ -376,13 +371,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
376 local->next_scan_state = SCAN_DECISION; 371 local->next_scan_state = SCAN_DECISION;
377 local->scan_channel_idx = 0; 372 local->scan_channel_idx = 0;
378 373
379 spin_lock_bh(&local->filter_lock); 374 ieee80211_configure_filter(local);
380 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
381 drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
382 &local->filter_flags,
383 local->mc_count,
384 local->mc_list);
385 spin_unlock_bh(&local->filter_lock);
386 375
387 /* TODO: start scan as soon as all nullfunc frames are ACKed */ 376 /* TODO: start scan as soon as all nullfunc frames are ACKed */
388 ieee80211_queue_delayed_work(&local->hw, 377 ieee80211_queue_delayed_work(&local->hw,
@@ -423,7 +412,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
423 local->scan_req = req; 412 local->scan_req = req;
424 local->scan_sdata = sdata; 413 local->scan_sdata = sdata;
425 414
426 if (req != &local->int_scan_req && 415 if (req != local->int_scan_req &&
427 sdata->vif.type == NL80211_IFTYPE_STATION && 416 sdata->vif.type == NL80211_IFTYPE_STATION &&
428 !list_empty(&ifmgd->work_list)) { 417 !list_empty(&ifmgd->work_list)) {
429 /* actually wait for the work it's doing to finish/time out */ 418 /* actually wait for the work it's doing to finish/time out */
@@ -672,6 +661,7 @@ void ieee80211_scan_work(struct work_struct *work)
672 int rc; 661 int rc;
673 662
674 local->scan_req = NULL; 663 local->scan_req = NULL;
664 local->scan_sdata = NULL;
675 665
676 rc = __ieee80211_start_scan(sdata, req); 666 rc = __ieee80211_start_scan(sdata, req);
677 mutex_unlock(&local->scan_mtx); 667 mutex_unlock(&local->scan_mtx);
@@ -743,10 +733,10 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
743 if (local->scan_req) 733 if (local->scan_req)
744 goto unlock; 734 goto unlock;
745 735
746 memcpy(local->int_scan_req.ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); 736 memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
747 local->int_scan_req.ssids[0].ssid_len = ssid_len; 737 local->int_scan_req->ssids[0].ssid_len = ssid_len;
748 738
749 ret = __ieee80211_start_scan(sdata, &sdata->local->int_scan_req); 739 ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
750 unlock: 740 unlock:
751 mutex_unlock(&local->scan_mtx); 741 mutex_unlock(&local->scan_mtx);
752 return ret; 742 return ret;
@@ -754,7 +744,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
754 744
755void ieee80211_scan_cancel(struct ieee80211_local *local) 745void ieee80211_scan_cancel(struct ieee80211_local *local)
756{ 746{
757 bool swscan; 747 bool abortscan;
758 748
759 cancel_delayed_work_sync(&local->scan_work); 749 cancel_delayed_work_sync(&local->scan_work);
760 750
@@ -763,9 +753,10 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
763 * queued -- mostly at suspend under RTNL. 753 * queued -- mostly at suspend under RTNL.
764 */ 754 */
765 mutex_lock(&local->scan_mtx); 755 mutex_lock(&local->scan_mtx);
766 swscan = test_bit(SCAN_SW_SCANNING, &local->scanning); 756 abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
757 (!local->scanning && local->scan_req);
767 mutex_unlock(&local->scan_mtx); 758 mutex_unlock(&local->scan_mtx);
768 759
769 if (swscan) 760 if (abortscan)
770 ieee80211_scan_completed(&local->hw, true); 761 ieee80211_scan_completed(&local->hw, true);
771} 762}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a360bceeba59..eec001491e66 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -349,6 +349,7 @@ int sta_info_insert(struct sta_info *sta)
349 goto out_free; 349 goto out_free;
350 } 350 }
351 list_add(&sta->list, &local->sta_list); 351 list_add(&sta->list, &local->sta_list);
352 local->sta_generation++;
352 local->num_sta++; 353 local->num_sta++;
353 sta_info_hash_add(local, sta); 354 sta_info_hash_add(local, sta);
354 355
@@ -485,6 +486,7 @@ static void __sta_info_unlink(struct sta_info **sta)
485 } 486 }
486 487
487 local->num_sta--; 488 local->num_sta--;
489 local->sta_generation++;
488 490
489 if (local->ops->sta_notify) { 491 if (local->ops->sta_notify) {
490 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 492 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 7cffaa046b33..5143d203256b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -317,30 +317,30 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
317 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 317 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
318 return TX_CONTINUE; 318 return TX_CONTINUE;
319 319
320 /* buffered in hardware */
321 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) {
322 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
323
324 return TX_CONTINUE;
325 }
326
320 /* buffered in mac80211 */ 327 /* buffered in mac80211 */
321 if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { 328 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
322 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 329 purge_old_ps_buffers(tx->local);
323 purge_old_ps_buffers(tx->local); 330
324 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= 331 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
325 AP_MAX_BC_BUFFER) {
326#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 332#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
327 if (net_ratelimit()) { 333 if (net_ratelimit())
328 printk(KERN_DEBUG "%s: BC TX buffer full - " 334 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
329 "dropping the oldest frame\n", 335 tx->dev->name);
330 tx->dev->name);
331 }
332#endif 336#endif
333 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 337 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
334 } else 338 } else
335 tx->local->total_ps_buffered++; 339 tx->local->total_ps_buffered++;
336 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
337 return TX_QUEUED;
338 }
339 340
340 /* buffered in hardware */ 341 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
341 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
342 342
343 return TX_CONTINUE; 343 return TX_QUEUED;
344} 344}
345 345
346static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, 346static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
@@ -700,7 +700,6 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
700 /* for pure STA mode without beacons, we can do it */ 700 /* for pure STA mode without beacons, we can do it */
701 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); 701 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
702 tx->sdata->sequence_number += 0x10; 702 tx->sdata->sequence_number += 0x10;
703 tx->sdata->sequence_number &= IEEE80211_SCTL_SEQ;
704 return TX_CONTINUE; 703 return TX_CONTINUE;
705 } 704 }
706 705
@@ -844,6 +843,23 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
844} 843}
845 844
846static ieee80211_tx_result debug_noinline 845static ieee80211_tx_result debug_noinline
846ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
847{
848 struct sk_buff *skb = tx->skb;
849
850 if (!tx->sta)
851 return TX_CONTINUE;
852
853 tx->sta->tx_packets++;
854 do {
855 tx->sta->tx_fragments++;
856 tx->sta->tx_bytes += skb->len;
857 } while ((skb = skb->next));
858
859 return TX_CONTINUE;
860}
861
862static ieee80211_tx_result debug_noinline
847ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) 863ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
848{ 864{
849 if (!tx->key) 865 if (!tx->key)
@@ -887,23 +903,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
887 return TX_CONTINUE; 903 return TX_CONTINUE;
888} 904}
889 905
890static ieee80211_tx_result debug_noinline
891ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
892{
893 struct sk_buff *skb = tx->skb;
894
895 if (!tx->sta)
896 return TX_CONTINUE;
897
898 tx->sta->tx_packets++;
899 do {
900 tx->sta->tx_fragments++;
901 tx->sta->tx_bytes += skb->len;
902 } while ((skb = skb->next));
903
904 return TX_CONTINUE;
905}
906
907/* actual transmit path */ 906/* actual transmit path */
908 907
909/* 908/*
@@ -1154,6 +1153,9 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1154 next = skb->next; 1153 next = skb->next;
1155 len = skb->len; 1154 len = skb->len;
1156 1155
1156 if (next)
1157 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
1158
1157 sdata = vif_to_sdata(info->control.vif); 1159 sdata = vif_to_sdata(info->control.vif);
1158 1160
1159 switch (sdata->vif.type) { 1161 switch (sdata->vif.type) {
@@ -1210,9 +1212,9 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1210 CALL_TXH(ieee80211_tx_h_sequence) 1212 CALL_TXH(ieee80211_tx_h_sequence)
1211 CALL_TXH(ieee80211_tx_h_fragment) 1213 CALL_TXH(ieee80211_tx_h_fragment)
1212 /* handlers after fragment must be aware of tx info fragmentation! */ 1214 /* handlers after fragment must be aware of tx info fragmentation! */
1215 CALL_TXH(ieee80211_tx_h_stats)
1213 CALL_TXH(ieee80211_tx_h_encrypt) 1216 CALL_TXH(ieee80211_tx_h_encrypt)
1214 CALL_TXH(ieee80211_tx_h_calculate_duration) 1217 CALL_TXH(ieee80211_tx_h_calculate_duration)
1215 CALL_TXH(ieee80211_tx_h_stats)
1216#undef CALL_TXH 1218#undef CALL_TXH
1217 1219
1218 txh_done: 1220 txh_done:
@@ -1410,16 +1412,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1410 1412
1411 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1413 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1412 1414
1413 if (ieee80211_vif_is_mesh(&sdata->vif) && 1415 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1414 ieee80211_is_data(hdr->frame_control)) {
1415 if (is_multicast_ether_addr(hdr->addr3))
1416 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1417 else
1418 if (mesh_nexthop_lookup(skb, sdata)) {
1419 dev_put(sdata->dev);
1420 return;
1421 }
1422 } else if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1423 int hdrlen; 1416 int hdrlen;
1424 u16 len_rthdr; 1417 u16 len_rthdr;
1425 1418
@@ -1476,13 +1469,22 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1476 1469
1477 info->control.vif = &sdata->vif; 1470 info->control.vif = &sdata->vif;
1478 1471
1472 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1473 ieee80211_is_data(hdr->frame_control) &&
1474 !is_multicast_ether_addr(hdr->addr1))
1475 if (mesh_nexthop_lookup(skb, sdata)) {
1476 /* skb queued: don't free */
1477 dev_put(sdata->dev);
1478 return;
1479 }
1480
1479 ieee80211_select_queue(local, skb); 1481 ieee80211_select_queue(local, skb);
1480 ieee80211_tx(sdata, skb, false); 1482 ieee80211_tx(sdata, skb, false);
1481 dev_put(sdata->dev); 1483 dev_put(sdata->dev);
1482} 1484}
1483 1485
1484int ieee80211_monitor_start_xmit(struct sk_buff *skb, 1486netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1485 struct net_device *dev) 1487 struct net_device *dev)
1486{ 1488{
1487 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1489 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1488 struct ieee80211_channel *chan = local->hw.conf.channel; 1490 struct ieee80211_channel *chan = local->hw.conf.channel;
@@ -1566,8 +1568,8 @@ fail:
1566 * encapsulated packet will then be passed to master interface, wlan#.11, for 1568 * encapsulated packet will then be passed to master interface, wlan#.11, for
1567 * transmission (through low-level driver). 1569 * transmission (through low-level driver).
1568 */ 1570 */
1569int ieee80211_subif_start_xmit(struct sk_buff *skb, 1571netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1570 struct net_device *dev) 1572 struct net_device *dev)
1571{ 1573{
1572 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1574 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1573 struct ieee80211_local *local = sdata->local; 1575 struct ieee80211_local *local = sdata->local;
@@ -1617,52 +1619,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1617 break; 1619 break;
1618#ifdef CONFIG_MAC80211_MESH 1620#ifdef CONFIG_MAC80211_MESH
1619 case NL80211_IFTYPE_MESH_POINT: 1621 case NL80211_IFTYPE_MESH_POINT:
1620 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1621 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { 1622 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1622 /* Do not send frames with mesh_ttl == 0 */ 1623 /* Do not send frames with mesh_ttl == 0 */
1623 sdata->u.mesh.mshstats.dropped_frames_ttl++; 1624 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1624 ret = NETDEV_TX_OK; 1625 ret = NETDEV_TX_OK;
1625 goto fail; 1626 goto fail;
1626 } 1627 }
1627 memset(&mesh_hdr, 0, sizeof(mesh_hdr));
1628 1628
1629 if (compare_ether_addr(dev->dev_addr, 1629 if (compare_ether_addr(dev->dev_addr,
1630 skb->data + ETH_ALEN) == 0) { 1630 skb->data + ETH_ALEN) == 0) {
1631 /* RA TA DA SA */ 1631 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1632 memset(hdr.addr1, 0, ETH_ALEN); 1632 skb->data, skb->data + ETH_ALEN);
1633 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1633 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1634 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1634 sdata, NULL, NULL, NULL);
1635 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1636 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata);
1637 } else { 1635 } else {
1638 /* packet from other interface */ 1636 /* packet from other interface */
1639 struct mesh_path *mppath; 1637 struct mesh_path *mppath;
1638 int is_mesh_mcast = 1;
1639 char *mesh_da;
1640 1640
1641 memset(hdr.addr1, 0, ETH_ALEN); 1641 rcu_read_lock();
1642 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1643 memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN);
1644
1645 if (is_multicast_ether_addr(skb->data)) 1642 if (is_multicast_ether_addr(skb->data))
1646 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1643 /* DA TA mSA AE:SA */
1644 mesh_da = skb->data;
1647 else { 1645 else {
1648 rcu_read_lock();
1649 mppath = mpp_path_lookup(skb->data, sdata); 1646 mppath = mpp_path_lookup(skb->data, sdata);
1650 if (mppath) 1647 if (mppath) {
1651 memcpy(hdr.addr3, mppath->mpp, ETH_ALEN); 1648 /* RA TA mDA mSA AE:DA SA */
1652 else 1649 mesh_da = mppath->mpp;
1653 memset(hdr.addr3, 0xff, ETH_ALEN); 1650 is_mesh_mcast = 0;
1654 rcu_read_unlock(); 1651 } else
1652 /* DA TA mSA AE:SA */
1653 mesh_da = dev->broadcast;
1655 } 1654 }
1655 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1656 mesh_da, dev->dev_addr);
1657 rcu_read_unlock();
1658 if (is_mesh_mcast)
1659 meshhdrlen =
1660 ieee80211_new_mesh_header(&mesh_hdr,
1661 sdata,
1662 skb->data + ETH_ALEN,
1663 NULL,
1664 NULL);
1665 else
1666 meshhdrlen =
1667 ieee80211_new_mesh_header(&mesh_hdr,
1668 sdata,
1669 NULL,
1670 skb->data,
1671 skb->data + ETH_ALEN);
1656 1672
1657 mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6;
1658 mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
1659 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum);
1660 memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN);
1661 memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN);
1662 sdata->u.mesh.mesh_seqnum++;
1663 meshhdrlen = 18;
1664 } 1673 }
1665 hdrlen = 30;
1666 break; 1674 break;
1667#endif 1675#endif
1668 case NL80211_IFTYPE_STATION: 1676 case NL80211_IFTYPE_STATION:
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e55d57f559ec..dd6564321369 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1007,6 +1007,16 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1007 return supp_rates; 1007 return supp_rates;
1008} 1008}
1009 1009
1010void ieee80211_stop_device(struct ieee80211_local *local)
1011{
1012 ieee80211_led_radio(local, false);
1013
1014 cancel_work_sync(&local->reconfig_filter);
1015 drv_stop(local);
1016
1017 flush_workqueue(local->workqueue);
1018}
1019
1010int ieee80211_reconfig(struct ieee80211_local *local) 1020int ieee80211_reconfig(struct ieee80211_local *local)
1011{ 1021{
1012 struct ieee80211_hw *hw = &local->hw; 1022 struct ieee80211_hw *hw = &local->hw;
@@ -1076,9 +1086,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1076 /* reconfigure hardware */ 1086 /* reconfigure hardware */
1077 ieee80211_hw_config(local, ~0); 1087 ieee80211_hw_config(local, ~0);
1078 1088
1079 spin_lock_bh(&local->filter_lock);
1080 ieee80211_configure_filter(local); 1089 ieee80211_configure_filter(local);
1081 spin_unlock_bh(&local->filter_lock);
1082 1090
1083 /* Finally also reconfigure all the BSS information */ 1091 /* Finally also reconfigure all the BSS information */
1084 list_for_each_entry(sdata, &local->interfaces, list) { 1092 list_for_each_entry(sdata, &local->interfaces, list) {
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 43f5676b1af4..d80b8192e0d4 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -74,7 +74,7 @@ static unsigned int
74xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par) 74xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
75{ 75{
76 const struct xt_rateest_target_info *info = par->targinfo; 76 const struct xt_rateest_target_info *info = par->targinfo;
77 struct gnet_stats_basic *stats = &info->est->bstats; 77 struct gnet_stats_basic_packed *stats = &info->est->bstats;
78 78
79 spin_lock_bh(&info->est->lock); 79 spin_lock_bh(&info->est->lock);
80 stats->bytes += skb->len; 80 stats->bytes += skb->len;
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 98fc190e8f0e..390b7d09fe51 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -52,7 +52,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
52 52
53 q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); 53 q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
54 if (q->master == NULL) 54 if (q->master == NULL)
55 return -ENOMEM; 55 return false;
56 56
57 q->master->quota = q->quota; 57 q->master->quota = q->quota;
58 return true; 58 return true;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 575c64341508..66f6ba0bab11 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -136,7 +136,7 @@ int genl_register_mc_group(struct genl_family *family,
136{ 136{
137 int id; 137 int id;
138 unsigned long *new_groups; 138 unsigned long *new_groups;
139 int err; 139 int err = 0;
140 140
141 BUG_ON(grp->name[0] == '\0'); 141 BUG_ON(grp->name[0] == '\0');
142 142
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ce51ce012cda..ce1a34b99c23 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -847,6 +847,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
847 sax->fsa_ax25.sax25_family = AF_NETROM; 847 sax->fsa_ax25.sax25_family = AF_NETROM;
848 sax->fsa_ax25.sax25_ndigis = 1; 848 sax->fsa_ax25.sax25_ndigis = 1;
849 sax->fsa_ax25.sax25_call = nr->user_addr; 849 sax->fsa_ax25.sax25_call = nr->user_addr;
850 memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
850 sax->fsa_digipeater[0] = nr->dest_addr; 851 sax->fsa_digipeater[0] = nr->dest_addr;
851 *uaddr_len = sizeof(struct full_sockaddr_ax25); 852 *uaddr_len = sizeof(struct full_sockaddr_ax25);
852 } else { 853 } else {
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index c7b7838a0519..7aa11b01b2e2 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -169,7 +169,7 @@ static int nr_close(struct net_device *dev)
169 return 0; 169 return 0;
170} 170}
171 171
172static int nr_xmit(struct sk_buff *skb, struct net_device *dev) 172static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
173{ 173{
174 struct net_device_stats *stats = &dev->stats; 174 struct net_device_stats *stats = &dev->stats;
175 unsigned int len = skb->len; 175 unsigned int len = skb->len;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e943c16552a2..4eb1ac9a7679 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -630,23 +630,23 @@ out:
630 return dev; 630 return dev;
631} 631}
632 632
633static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters) 633static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
634 ax25_address *digipeaters)
634{ 635{
635 static ax25_digi ax25_digi;
636 int i; 636 int i;
637 637
638 if (ndigis == 0) 638 if (ndigis == 0)
639 return NULL; 639 return NULL;
640 640
641 for (i = 0; i < ndigis; i++) { 641 for (i = 0; i < ndigis; i++) {
642 ax25_digi.calls[i] = digipeaters[i]; 642 digi->calls[i] = digipeaters[i];
643 ax25_digi.repeated[i] = 0; 643 digi->repeated[i] = 0;
644 } 644 }
645 645
646 ax25_digi.ndigi = ndigis; 646 digi->ndigi = ndigis;
647 ax25_digi.lastrepeat = -1; 647 digi->lastrepeat = -1;
648 648
649 return &ax25_digi; 649 return digi;
650} 650}
651 651
652/* 652/*
@@ -656,6 +656,7 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
656{ 656{
657 struct nr_route_struct nr_route; 657 struct nr_route_struct nr_route;
658 struct net_device *dev; 658 struct net_device *dev;
659 ax25_digi digi;
659 int ret; 660 int ret;
660 661
661 switch (cmd) { 662 switch (cmd) {
@@ -673,13 +674,15 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
673 ret = nr_add_node(&nr_route.callsign, 674 ret = nr_add_node(&nr_route.callsign,
674 nr_route.mnemonic, 675 nr_route.mnemonic,
675 &nr_route.neighbour, 676 &nr_route.neighbour,
676 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters), 677 nr_call_to_digi(&digi, nr_route.ndigis,
678 nr_route.digipeaters),
677 dev, nr_route.quality, 679 dev, nr_route.quality,
678 nr_route.obs_count); 680 nr_route.obs_count);
679 break; 681 break;
680 case NETROM_NEIGH: 682 case NETROM_NEIGH:
681 ret = nr_add_neigh(&nr_route.callsign, 683 ret = nr_add_neigh(&nr_route.callsign,
682 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters), 684 nr_call_to_digi(&digi, nr_route.ndigis,
685 nr_route.digipeaters),
683 dev, nr_route.quality); 686 dev, nr_route.quality);
684 break; 687 break;
685 default: 688 default:
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 4667af51ed71..d183509d3fa6 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -183,7 +183,7 @@ static int gprs_close(struct net_device *dev)
183 return 0; 183 return 0;
184} 184}
185 185
186static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) 186static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev)
187{ 187{
188 struct gprs_dev *gp = netdev_priv(dev); 188 struct gprs_dev *gp = netdev_priv(dev);
189 struct sock *sk = gp->sk; 189 struct sock *sk = gp->sk;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index aa1617a7f265..7a4ee397d2f7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -413,6 +413,7 @@ found:
413} 413}
414EXPORT_SYMBOL(pn_sock_get_port); 414EXPORT_SYMBOL(pn_sock_get_port);
415 415
416#ifdef CONFIG_PROC_FS
416static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) 417static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
417{ 418{
418 struct net *net = seq_file_net(seq); 419 struct net *net = seq_file_net(seq);
@@ -498,7 +499,8 @@ static const struct seq_operations pn_sock_seq_ops = {
498 499
499static int pn_sock_open(struct inode *inode, struct file *file) 500static int pn_sock_open(struct inode *inode, struct file *file)
500{ 501{
501 return seq_open(file, &pn_sock_seq_ops); 502 return seq_open_net(inode, file, &pn_sock_seq_ops,
503 sizeof(struct seq_net_private));
502} 504}
503 505
504const struct file_operations pn_sock_seq_fops = { 506const struct file_operations pn_sock_seq_fops = {
@@ -506,5 +508,6 @@ const struct file_operations pn_sock_seq_fops = {
506 .open = pn_sock_open, 508 .open = pn_sock_open,
507 .read = seq_read, 509 .read = seq_read,
508 .llseek = seq_lseek, 510 .llseek = seq_lseek,
509 .release = seq_release, 511 .release = seq_release_net,
510}; 512};
513#endif
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 796773b5df9b..ec753b3ae72a 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -1,14 +1,28 @@
1 1
2config RDS 2config RDS
3 tristate "Reliable Datagram Sockets (RDS) (EXPERIMENTAL)" 3 tristate "The RDS Protocol (EXPERIMENTAL)"
4 depends on INET && INFINIBAND_IPOIB && EXPERIMENTAL 4 depends on INET && EXPERIMENTAL
5 depends on INFINIBAND && INFINIBAND_ADDR_TRANS
6 ---help--- 5 ---help---
7 RDS provides reliable, sequenced delivery of datagrams 6 The RDS (Reliable Datagram Sockets) protocol provides reliable,
8 over Infiniband. 7 sequenced delivery of datagrams over Infiniband, iWARP,
8 or TCP.
9
10config RDS_RDMA
11 tristate "RDS over Infiniband and iWARP"
12 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
13 ---help---
14 Allow RDS to use Infiniband and iWARP as a transport.
15 This transport supports RDMA operations.
16
17config RDS_TCP
18 tristate "RDS over TCP"
19 depends on RDS
20 ---help---
21 Allow RDS to use TCP as a transport.
22 This transport does not support RDMA operations.
9 23
10config RDS_DEBUG 24config RDS_DEBUG
11 bool "Debugging messages" 25 bool "RDS debugging messages"
12 depends on RDS 26 depends on RDS
13 default n 27 default n
14 28
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 51f27585fa08..b46eca109688 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -1,13 +1,20 @@
1obj-$(CONFIG_RDS) += rds.o 1obj-$(CONFIG_RDS) += rds.o
2rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \ 2rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
3 recv.o send.o stats.o sysctl.o threads.o transport.o \ 3 recv.o send.o stats.o sysctl.o threads.o transport.o \
4 loop.o page.o rdma.o \ 4 loop.o page.o rdma.o
5 rdma_transport.o \ 5
6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
7rds_rdma-objs := rdma_transport.o \
6 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \ 8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
7 ib_sysctl.o ib_rdma.o \ 9 ib_sysctl.o ib_rdma.o \
8 iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \ 10 iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
9 iw_sysctl.o iw_rdma.o 11 iw_sysctl.o iw_rdma.o
10 12
13
14obj-$(CONFIG_RDS_TCP) += rds_tcp.o
15rds_tcp-objs := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
16 tcp_send.o tcp_stats.o
17
11ifeq ($(CONFIG_RDS_DEBUG), y) 18ifeq ($(CONFIG_RDS_DEBUG), y)
12EXTRA_CFLAGS += -DDEBUG 19EXTRA_CFLAGS += -DDEBUG
13endif 20endif
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index b11e7e527864..108ed2e671c5 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -39,7 +39,6 @@
39 39
40#include "rds.h" 40#include "rds.h"
41#include "rdma.h" 41#include "rdma.h"
42#include "rdma_transport.h"
43 42
44/* this is just used for stats gathering :/ */ 43/* this is just used for stats gathering :/ */
45static DEFINE_SPINLOCK(rds_sock_lock); 44static DEFINE_SPINLOCK(rds_sock_lock);
@@ -509,7 +508,6 @@ out:
509 508
510static void __exit rds_exit(void) 509static void __exit rds_exit(void)
511{ 510{
512 rds_rdma_exit();
513 sock_unregister(rds_family_ops.family); 511 sock_unregister(rds_family_ops.family);
514 proto_unregister(&rds_proto); 512 proto_unregister(&rds_proto);
515 rds_conn_exit(); 513 rds_conn_exit();
@@ -549,14 +547,8 @@ static int __init rds_init(void)
549 rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info); 547 rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
550 rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info); 548 rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
551 549
552 /* ib/iwarp transports currently compiled-in */
553 ret = rds_rdma_init();
554 if (ret)
555 goto out_sock;
556 goto out; 550 goto out;
557 551
558out_sock:
559 sock_unregister(rds_family_ops.family);
560out_proto: 552out_proto:
561 proto_unregister(&rds_proto); 553 proto_unregister(&rds_proto);
562out_stats: 554out_stats:
diff --git a/net/rds/bind.c b/net/rds/bind.c
index c17cc39160ce..5d95fc007f1a 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -187,6 +187,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
187 if (trans == NULL) { 187 if (trans == NULL) {
188 ret = -EADDRNOTAVAIL; 188 ret = -EADDRNOTAVAIL;
189 rds_remove_bound(rs); 189 rds_remove_bound(rs);
190 if (printk_ratelimit())
191 printk(KERN_INFO "RDS: rds_bind() could not find a transport, "
192 "load rds_tcp or rds_rdma?\n");
190 goto out; 193 goto out;
191 } 194 }
192 195
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 710e4599d76c..dd2711df640b 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -254,6 +254,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
254 read_unlock_irqrestore(&rds_cong_monitor_lock, flags); 254 read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
255 } 255 }
256} 256}
257EXPORT_SYMBOL_GPL(rds_cong_map_updated);
257 258
258int rds_cong_updated_since(unsigned long *recent) 259int rds_cong_updated_since(unsigned long *recent)
259{ 260{
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b420a20d84fd..cc8b568c0c84 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -255,12 +255,14 @@ struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
255{ 255{
256 return __rds_conn_create(laddr, faddr, trans, gfp, 0); 256 return __rds_conn_create(laddr, faddr, trans, gfp, 0);
257} 257}
258EXPORT_SYMBOL_GPL(rds_conn_create);
258 259
259struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, 260struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
260 struct rds_transport *trans, gfp_t gfp) 261 struct rds_transport *trans, gfp_t gfp)
261{ 262{
262 return __rds_conn_create(laddr, faddr, trans, gfp, 1); 263 return __rds_conn_create(laddr, faddr, trans, gfp, 1);
263} 264}
265EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
264 266
265void rds_conn_destroy(struct rds_connection *conn) 267void rds_conn_destroy(struct rds_connection *conn)
266{ 268{
@@ -303,6 +305,7 @@ void rds_conn_destroy(struct rds_connection *conn)
303 305
304 rds_conn_count--; 306 rds_conn_count--;
305} 307}
308EXPORT_SYMBOL_GPL(rds_conn_destroy);
306 309
307static void rds_conn_message_info(struct socket *sock, unsigned int len, 310static void rds_conn_message_info(struct socket *sock, unsigned int len,
308 struct rds_info_iterator *iter, 311 struct rds_info_iterator *iter,
@@ -406,6 +409,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
406 409
407 spin_unlock_irqrestore(&rds_conn_lock, flags); 410 spin_unlock_irqrestore(&rds_conn_lock, flags);
408} 411}
412EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
409 413
410static int rds_conn_info_visitor(struct rds_connection *conn, 414static int rds_conn_info_visitor(struct rds_connection *conn,
411 void *buffer) 415 void *buffer)
@@ -481,6 +485,7 @@ void rds_conn_drop(struct rds_connection *conn)
481 atomic_set(&conn->c_state, RDS_CONN_ERROR); 485 atomic_set(&conn->c_state, RDS_CONN_ERROR);
482 queue_work(rds_wq, &conn->c_down_w); 486 queue_work(rds_wq, &conn->c_down_w);
483} 487}
488EXPORT_SYMBOL_GPL(rds_conn_drop);
484 489
485/* 490/*
486 * An error occurred on the connection 491 * An error occurred on the connection
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 868559ac42d7..536ebe5d3f6b 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -282,6 +282,7 @@ struct rds_transport rds_ib_transport = {
282 .flush_mrs = rds_ib_flush_mrs, 282 .flush_mrs = rds_ib_flush_mrs,
283 .t_owner = THIS_MODULE, 283 .t_owner = THIS_MODULE,
284 .t_name = "infiniband", 284 .t_name = "infiniband",
285 .t_type = RDS_TRANS_IB
285}; 286};
286 287
287int __init rds_ib_init(void) 288int __init rds_ib_init(void)
diff --git a/net/rds/info.c b/net/rds/info.c
index 62aeef37aefe..814a91a6f4a7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -79,6 +79,7 @@ void rds_info_register_func(int optname, rds_info_func func)
79 rds_info_funcs[offset] = func; 79 rds_info_funcs[offset] = func;
80 spin_unlock(&rds_info_lock); 80 spin_unlock(&rds_info_lock);
81} 81}
82EXPORT_SYMBOL_GPL(rds_info_register_func);
82 83
83void rds_info_deregister_func(int optname, rds_info_func func) 84void rds_info_deregister_func(int optname, rds_info_func func)
84{ 85{
@@ -91,6 +92,7 @@ void rds_info_deregister_func(int optname, rds_info_func func)
91 rds_info_funcs[offset] = NULL; 92 rds_info_funcs[offset] = NULL;
92 spin_unlock(&rds_info_lock); 93 spin_unlock(&rds_info_lock);
93} 94}
95EXPORT_SYMBOL_GPL(rds_info_deregister_func);
94 96
95/* 97/*
96 * Typically we hold an atomic kmap across multiple rds_info_copy() calls 98 * Typically we hold an atomic kmap across multiple rds_info_copy() calls
@@ -137,6 +139,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
137 } 139 }
138 } 140 }
139} 141}
142EXPORT_SYMBOL_GPL(rds_info_copy);
140 143
141/* 144/*
142 * @optval points to the userspace buffer that the information snapshot 145 * @optval points to the userspace buffer that the information snapshot
diff --git a/net/rds/iw.c b/net/rds/iw.c
index f5e9a29a80a7..db224f7c2937 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -284,6 +284,7 @@ struct rds_transport rds_iw_transport = {
284 .flush_mrs = rds_iw_flush_mrs, 284 .flush_mrs = rds_iw_flush_mrs,
285 .t_owner = THIS_MODULE, 285 .t_owner = THIS_MODULE,
286 .t_name = "iwarp", 286 .t_name = "iwarp",
287 .t_type = RDS_TRANS_IWARP,
287 .t_prefer_loopback = 1, 288 .t_prefer_loopback = 1,
288}; 289};
289 290
diff --git a/net/rds/message.c b/net/rds/message.c
index 5a15dc8d0cd7..ca50a8ec9742 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -50,6 +50,7 @@ void rds_message_addref(struct rds_message *rm)
50 rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); 50 rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
51 atomic_inc(&rm->m_refcount); 51 atomic_inc(&rm->m_refcount);
52} 52}
53EXPORT_SYMBOL_GPL(rds_message_addref);
53 54
54/* 55/*
55 * This relies on dma_map_sg() not touching sg[].page during merging. 56 * This relies on dma_map_sg() not touching sg[].page during merging.
@@ -92,6 +93,7 @@ void rds_message_put(struct rds_message *rm)
92 kfree(rm); 93 kfree(rm);
93 } 94 }
94} 95}
96EXPORT_SYMBOL_GPL(rds_message_put);
95 97
96void rds_message_inc_free(struct rds_incoming *inc) 98void rds_message_inc_free(struct rds_incoming *inc)
97{ 99{
@@ -108,6 +110,7 @@ void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
108 hdr->h_sequence = cpu_to_be64(seq); 110 hdr->h_sequence = cpu_to_be64(seq);
109 hdr->h_exthdr[0] = RDS_EXTHDR_NONE; 111 hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
110} 112}
113EXPORT_SYMBOL_GPL(rds_message_populate_header);
111 114
112int rds_message_add_extension(struct rds_header *hdr, 115int rds_message_add_extension(struct rds_header *hdr,
113 unsigned int type, const void *data, unsigned int len) 116 unsigned int type, const void *data, unsigned int len)
@@ -133,6 +136,7 @@ int rds_message_add_extension(struct rds_header *hdr,
133 dst[len] = RDS_EXTHDR_NONE; 136 dst[len] = RDS_EXTHDR_NONE;
134 return 1; 137 return 1;
135} 138}
139EXPORT_SYMBOL_GPL(rds_message_add_extension);
136 140
137/* 141/*
138 * If a message has extension headers, retrieve them here. 142 * If a message has extension headers, retrieve them here.
@@ -208,6 +212,7 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o
208 ext_hdr.h_rdma_offset = cpu_to_be32(offset); 212 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
209 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr)); 213 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
210} 214}
215EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
211 216
212struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp) 217struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
213{ 218{
@@ -399,4 +404,5 @@ void rds_message_unmapped(struct rds_message *rm)
399 if (waitqueue_active(&rds_message_flush_waitq)) 404 if (waitqueue_active(&rds_message_flush_waitq))
400 wake_up(&rds_message_flush_waitq); 405 wake_up(&rds_message_flush_waitq);
401} 406}
407EXPORT_SYMBOL_GPL(rds_message_unmapped);
402 408
diff --git a/net/rds/page.c b/net/rds/page.c
index c460743a89ad..55c21efdb62e 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -81,6 +81,7 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
81 81
82 return 0; 82 return 0;
83} 83}
84EXPORT_SYMBOL_GPL(rds_page_copy_user);
84 85
85/* 86/*
86 * Message allocation uses this to build up regions of a message. 87 * Message allocation uses this to build up regions of a message.
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7a6c748cb56c..9ece910ea394 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -203,6 +203,7 @@ err_iw_init:
203out: 203out:
204 return ret; 204 return ret;
205} 205}
206module_init(rds_rdma_init);
206 207
207void rds_rdma_exit(void) 208void rds_rdma_exit(void)
208{ 209{
@@ -211,4 +212,9 @@ void rds_rdma_exit(void)
211 rds_ib_exit(); 212 rds_ib_exit();
212 rds_iw_exit(); 213 rds_iw_exit();
213} 214}
215module_exit(rds_rdma_exit);
216
217MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
218MODULE_DESCRIPTION("RDS: IB/iWARP transport");
219MODULE_LICENSE("Dual BSD/GPL");
214 220
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 290566c69d28..85d6f897ecc7 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -311,11 +311,17 @@ struct rds_notifier {
311 * flag and header. 311 * flag and header.
312 */ 312 */
313 313
314#define RDS_TRANS_IB 0
315#define RDS_TRANS_IWARP 1
316#define RDS_TRANS_TCP 2
317#define RDS_TRANS_COUNT 3
318
314struct rds_transport { 319struct rds_transport {
315 char t_name[TRANSNAMSIZ]; 320 char t_name[TRANSNAMSIZ];
316 struct list_head t_item; 321 struct list_head t_item;
317 struct module *t_owner; 322 struct module *t_owner;
318 unsigned int t_prefer_loopback:1; 323 unsigned int t_prefer_loopback:1;
324 unsigned int t_type;
319 325
320 int (*laddr_check)(__be32 addr); 326 int (*laddr_check)(__be32 addr);
321 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); 327 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 86bc1a06ebbd..fdff33c7b432 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -46,12 +46,14 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
46 inc->i_saddr = saddr; 46 inc->i_saddr = saddr;
47 inc->i_rdma_cookie = 0; 47 inc->i_rdma_cookie = 0;
48} 48}
49EXPORT_SYMBOL_GPL(rds_inc_init);
49 50
50void rds_inc_addref(struct rds_incoming *inc) 51void rds_inc_addref(struct rds_incoming *inc)
51{ 52{
52 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount)); 53 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
53 atomic_inc(&inc->i_refcount); 54 atomic_inc(&inc->i_refcount);
54} 55}
56EXPORT_SYMBOL_GPL(rds_inc_addref);
55 57
56void rds_inc_put(struct rds_incoming *inc) 58void rds_inc_put(struct rds_incoming *inc)
57{ 59{
@@ -62,6 +64,7 @@ void rds_inc_put(struct rds_incoming *inc)
62 inc->i_conn->c_trans->inc_free(inc); 64 inc->i_conn->c_trans->inc_free(inc);
63 } 65 }
64} 66}
67EXPORT_SYMBOL_GPL(rds_inc_put);
65 68
66static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, 69static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
67 struct rds_cong_map *map, 70 struct rds_cong_map *map,
@@ -237,6 +240,7 @@ out:
237 if (rs) 240 if (rs)
238 rds_sock_put(rs); 241 rds_sock_put(rs);
239} 242}
243EXPORT_SYMBOL_GPL(rds_recv_incoming);
240 244
241/* 245/*
242 * be very careful here. This is being called as the condition in 246 * be very careful here. This is being called as the condition in
diff --git a/net/rds/send.c b/net/rds/send.c
index a4a7f428cd76..28c88ff3d038 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -439,6 +439,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
439 sock_put(rds_rs_to_sk(rs)); 439 sock_put(rds_rs_to_sk(rs));
440 } 440 }
441} 441}
442EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
442 443
443/* 444/*
444 * This is the same as rds_rdma_send_complete except we 445 * This is the same as rds_rdma_send_complete except we
@@ -494,6 +495,7 @@ out:
494 495
495 return found; 496 return found;
496} 497}
498EXPORT_SYMBOL_GPL(rds_send_get_message);
497 499
498/* 500/*
499 * This removes messages from the socket's list if they're on it. The list 501 * This removes messages from the socket's list if they're on it. The list
@@ -610,6 +612,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
610 /* now remove the messages from the sock list as needed */ 612 /* now remove the messages from the sock list as needed */
611 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); 613 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
612} 614}
615EXPORT_SYMBOL_GPL(rds_send_drop_acked);
613 616
614void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) 617void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
615{ 618{
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 91d8c58b8335..7598eb07cfb1 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -37,6 +37,7 @@
37#include "rds.h" 37#include "rds.h"
38 38
39DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 39DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
40EXPORT_PER_CPU_SYMBOL_GPL(rds_stats);
40 41
41/* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */ 42/* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */
42 43
@@ -90,6 +91,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter,
90 rds_info_copy(iter, &ctr, sizeof(ctr)); 91 rds_info_copy(iter, &ctr, sizeof(ctr));
91 } 92 }
92} 93}
94EXPORT_SYMBOL_GPL(rds_stats_info_copy);
93 95
94/* 96/*
95 * This gives global counters across all the transports. The strings 97 * This gives global counters across all the transports. The strings
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
new file mode 100644
index 000000000000..b5198aee45d3
--- /dev/null
+++ b/net/rds/tcp.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40/* only for info exporting */
41static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
42static LIST_HEAD(rds_tcp_tc_list);
43unsigned int rds_tcp_tc_count;
44
45/* Track rds_tcp_connection structs so they can be cleaned up */
46static DEFINE_SPINLOCK(rds_tcp_conn_lock);
47static LIST_HEAD(rds_tcp_conn_list);
48
49static struct kmem_cache *rds_tcp_conn_slab;
50
51#define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024)
52
53/* doing it this way avoids calling tcp_sk() */
54void rds_tcp_nonagle(struct socket *sock)
55{
56 mm_segment_t oldfs = get_fs();
57 int val = 1;
58
59 set_fs(KERNEL_DS);
60 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
61 sizeof(val));
62 set_fs(oldfs);
63}
64
65void rds_tcp_tune(struct socket *sock)
66{
67 struct sock *sk = sock->sk;
68
69 rds_tcp_nonagle(sock);
70
71 /*
72 * We're trying to saturate gigabit with the default,
73 * see svc_sock_setbufsize().
74 */
75 lock_sock(sk);
76 sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE;
77 sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE;
78 sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
79 release_sock(sk);
80}
81
82u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
83{
84 return tcp_sk(tc->t_sock->sk)->snd_nxt;
85}
86
87u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
88{
89 return tcp_sk(tc->t_sock->sk)->snd_una;
90}
91
92void rds_tcp_restore_callbacks(struct socket *sock,
93 struct rds_tcp_connection *tc)
94{
95 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
96 write_lock_bh(&sock->sk->sk_callback_lock);
97
98 /* done under the callback_lock to serialize with write_space */
99 spin_lock(&rds_tcp_tc_list_lock);
100 list_del_init(&tc->t_list_item);
101 rds_tcp_tc_count--;
102 spin_unlock(&rds_tcp_tc_list_lock);
103
104 tc->t_sock = NULL;
105
106 sock->sk->sk_write_space = tc->t_orig_write_space;
107 sock->sk->sk_data_ready = tc->t_orig_data_ready;
108 sock->sk->sk_state_change = tc->t_orig_state_change;
109 sock->sk->sk_user_data = NULL;
110
111 write_unlock_bh(&sock->sk->sk_callback_lock);
112}
113
114/*
115 * This is the only path that sets tc->t_sock. Send and receive trust that
116 * it is set. The RDS_CONN_CONNECTED bit protects those paths from being
117 * called while it isn't set.
118 */
119void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
120{
121 struct rds_tcp_connection *tc = conn->c_transport_data;
122
123 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
124 write_lock_bh(&sock->sk->sk_callback_lock);
125
126 /* done under the callback_lock to serialize with write_space */
127 spin_lock(&rds_tcp_tc_list_lock);
128 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
129 rds_tcp_tc_count++;
130 spin_unlock(&rds_tcp_tc_list_lock);
131
132 /* accepted sockets need our listen data ready undone */
133 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
134 sock->sk->sk_data_ready = sock->sk->sk_user_data;
135
136 tc->t_sock = sock;
137 tc->conn = conn;
138 tc->t_orig_data_ready = sock->sk->sk_data_ready;
139 tc->t_orig_write_space = sock->sk->sk_write_space;
140 tc->t_orig_state_change = sock->sk->sk_state_change;
141
142 sock->sk->sk_user_data = conn;
143 sock->sk->sk_data_ready = rds_tcp_data_ready;
144 sock->sk->sk_write_space = rds_tcp_write_space;
145 sock->sk->sk_state_change = rds_tcp_state_change;
146
147 write_unlock_bh(&sock->sk->sk_callback_lock);
148}
149
150static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
151 struct rds_info_iterator *iter,
152 struct rds_info_lengths *lens)
153{
154 struct rds_info_tcp_socket tsinfo;
155 struct rds_tcp_connection *tc;
156 unsigned long flags;
157 struct sockaddr_in sin;
158 int sinlen;
159
160 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
161
162 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
163 goto out;
164
165 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
166
167 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
168 tsinfo.local_addr = sin.sin_addr.s_addr;
169 tsinfo.local_port = sin.sin_port;
170 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
171 tsinfo.peer_addr = sin.sin_addr.s_addr;
172 tsinfo.peer_port = sin.sin_port;
173
174 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
175 tsinfo.data_rem = tc->t_tinc_data_rem;
176 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
177 tsinfo.last_expected_una = tc->t_last_expected_una;
178 tsinfo.last_seen_una = tc->t_last_seen_una;
179
180 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
181 }
182
183out:
184 lens->nr = rds_tcp_tc_count;
185 lens->each = sizeof(tsinfo);
186
187 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
188}
189
190static int rds_tcp_laddr_check(__be32 addr)
191{
192 if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
193 return 0;
194 return -EADDRNOTAVAIL;
195}
196
197static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
198{
199 struct rds_tcp_connection *tc;
200
201 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
202 if (tc == NULL)
203 return -ENOMEM;
204
205 tc->t_sock = NULL;
206 tc->t_tinc = NULL;
207 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
208 tc->t_tinc_data_rem = 0;
209
210 conn->c_transport_data = tc;
211
212 spin_lock_irq(&rds_tcp_conn_lock);
213 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
214 spin_unlock_irq(&rds_tcp_conn_lock);
215
216 rdsdebug("alloced tc %p\n", conn->c_transport_data);
217 return 0;
218}
219
220static void rds_tcp_conn_free(void *arg)
221{
222 struct rds_tcp_connection *tc = arg;
223 rdsdebug("freeing tc %p\n", tc);
224 kmem_cache_free(rds_tcp_conn_slab, tc);
225}
226
227static void rds_tcp_destroy_conns(void)
228{
229 struct rds_tcp_connection *tc, *_tc;
230 LIST_HEAD(tmp_list);
231
232 /* avoid calling conn_destroy with irqs off */
233 spin_lock_irq(&rds_tcp_conn_lock);
234 list_splice(&rds_tcp_conn_list, &tmp_list);
235 INIT_LIST_HEAD(&rds_tcp_conn_list);
236 spin_unlock_irq(&rds_tcp_conn_lock);
237
238 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
239 if (tc->conn->c_passive)
240 rds_conn_destroy(tc->conn->c_passive);
241 rds_conn_destroy(tc->conn);
242 }
243}
244
245void rds_tcp_exit(void)
246{
247 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
248 rds_tcp_listen_stop();
249 rds_tcp_destroy_conns();
250 rds_trans_unregister(&rds_tcp_transport);
251 rds_tcp_recv_exit();
252 kmem_cache_destroy(rds_tcp_conn_slab);
253}
254module_exit(rds_tcp_exit);
255
256struct rds_transport rds_tcp_transport = {
257 .laddr_check = rds_tcp_laddr_check,
258 .xmit_prepare = rds_tcp_xmit_prepare,
259 .xmit_complete = rds_tcp_xmit_complete,
260 .xmit_cong_map = rds_tcp_xmit_cong_map,
261 .xmit = rds_tcp_xmit,
262 .recv = rds_tcp_recv,
263 .conn_alloc = rds_tcp_conn_alloc,
264 .conn_free = rds_tcp_conn_free,
265 .conn_connect = rds_tcp_conn_connect,
266 .conn_shutdown = rds_tcp_conn_shutdown,
267 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
268 .inc_purge = rds_tcp_inc_purge,
269 .inc_free = rds_tcp_inc_free,
270 .stats_info_copy = rds_tcp_stats_info_copy,
271 .exit = rds_tcp_exit,
272 .t_owner = THIS_MODULE,
273 .t_name = "tcp",
274 .t_type = RDS_TRANS_TCP,
275 .t_prefer_loopback = 1,
276};
277
278int __init rds_tcp_init(void)
279{
280 int ret;
281
282 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
283 sizeof(struct rds_tcp_connection),
284 0, 0, NULL);
285 if (rds_tcp_conn_slab == NULL) {
286 ret = -ENOMEM;
287 goto out;
288 }
289
290 ret = rds_tcp_recv_init();
291 if (ret)
292 goto out_slab;
293
294 ret = rds_trans_register(&rds_tcp_transport);
295 if (ret)
296 goto out_recv;
297
298 ret = rds_tcp_listen_init();
299 if (ret)
300 goto out_register;
301
302 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
303
304 goto out;
305
306out_register:
307 rds_trans_unregister(&rds_tcp_transport);
308out_recv:
309 rds_tcp_recv_exit();
310out_slab:
311 kmem_cache_destroy(rds_tcp_conn_slab);
312out:
313 return ret;
314}
315module_init(rds_tcp_init);
316
317MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
318MODULE_DESCRIPTION("RDS: TCP transport");
319MODULE_LICENSE("Dual BSD/GPL");
320
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
new file mode 100644
index 000000000000..844fa6b9cf5a
--- /dev/null
+++ b/net/rds/tcp.h
@@ -0,0 +1,93 @@
1#ifndef _RDS_TCP_H
2#define _RDS_TCP_H
3
4#define RDS_TCP_PORT 16385
5
6struct rds_tcp_incoming {
7 struct rds_incoming ti_inc;
8 struct sk_buff_head ti_skb_list;
9};
10
11struct rds_tcp_connection {
12
13 struct list_head t_tcp_node;
14 struct rds_connection *conn;
15 struct socket *t_sock;
16 void *t_orig_write_space;
17 void *t_orig_data_ready;
18 void *t_orig_state_change;
19
20 struct rds_tcp_incoming *t_tinc;
21 size_t t_tinc_hdr_rem;
22 size_t t_tinc_data_rem;
23
24 /* XXX error report? */
25 struct work_struct t_conn_w;
26 struct work_struct t_send_w;
27 struct work_struct t_down_w;
28 struct work_struct t_recv_w;
29
30 /* for info exporting only */
31 struct list_head t_list_item;
32 u32 t_last_sent_nxt;
33 u32 t_last_expected_una;
34 u32 t_last_seen_una;
35};
36
37struct rds_tcp_statistics {
38 uint64_t s_tcp_data_ready_calls;
39 uint64_t s_tcp_write_space_calls;
40 uint64_t s_tcp_sndbuf_full;
41 uint64_t s_tcp_connect_raced;
42 uint64_t s_tcp_listen_closed_stale;
43};
44
45/* tcp.c */
46int __init rds_tcp_init(void);
47void rds_tcp_exit(void);
48void rds_tcp_tune(struct socket *sock);
49void rds_tcp_nonagle(struct socket *sock);
50void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
51void rds_tcp_restore_callbacks(struct socket *sock,
52 struct rds_tcp_connection *tc);
53u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
54u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
55u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
56extern struct rds_transport rds_tcp_transport;
57
58/* tcp_connect.c */
59int rds_tcp_conn_connect(struct rds_connection *conn);
60void rds_tcp_conn_shutdown(struct rds_connection *conn);
61void rds_tcp_state_change(struct sock *sk);
62
63/* tcp_listen.c */
64int __init rds_tcp_listen_init(void);
65void rds_tcp_listen_stop(void);
66void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
67
68/* tcp_recv.c */
69int __init rds_tcp_recv_init(void);
70void rds_tcp_recv_exit(void);
71void rds_tcp_data_ready(struct sock *sk, int bytes);
72int rds_tcp_recv(struct rds_connection *conn);
73void rds_tcp_inc_purge(struct rds_incoming *inc);
74void rds_tcp_inc_free(struct rds_incoming *inc);
75int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
76 size_t size);
77
78/* tcp_send.c */
79void rds_tcp_xmit_prepare(struct rds_connection *conn);
80void rds_tcp_xmit_complete(struct rds_connection *conn);
81int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
82 unsigned int hdr_off, unsigned int sg, unsigned int off);
83void rds_tcp_write_space(struct sock *sk);
84int rds_tcp_xmit_cong_map(struct rds_connection *conn,
85 struct rds_cong_map *map, unsigned long offset);
86
87/* tcp_stats.c */
88DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
89#define rds_tcp_stats_inc(member) rds_stats_inc_which(rds_tcp_stats, member)
90unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
91 unsigned int avail);
92
93#endif
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
new file mode 100644
index 000000000000..211522f9a9a2
--- /dev/null
+++ b/net/rds/tcp_connect.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40void rds_tcp_state_change(struct sock *sk)
41{
42 void (*state_change)(struct sock *sk);
43 struct rds_connection *conn;
44 struct rds_tcp_connection *tc;
45
46 read_lock(&sk->sk_callback_lock);
47 conn = sk->sk_user_data;
48 if (conn == NULL) {
49 state_change = sk->sk_state_change;
50 goto out;
51 }
52 tc = conn->c_transport_data;
53 state_change = tc->t_orig_state_change;
54
55 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
56
57 switch(sk->sk_state) {
58 /* ignore connecting sockets as they make progress */
59 case TCP_SYN_SENT:
60 case TCP_SYN_RECV:
61 break;
62 case TCP_ESTABLISHED:
63 rds_connect_complete(conn);
64 break;
65 case TCP_CLOSE:
66 rds_conn_drop(conn);
67 default:
68 break;
69 }
70out:
71 read_unlock(&sk->sk_callback_lock);
72 state_change(sk);
73}
74
75int rds_tcp_conn_connect(struct rds_connection *conn)
76{
77 struct socket *sock = NULL;
78 struct sockaddr_in src, dest;
79 int ret;
80
81 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
82 if (ret < 0)
83 goto out;
84
85 rds_tcp_tune(sock);
86
87 src.sin_family = AF_INET;
88 src.sin_addr.s_addr = (__force u32)conn->c_laddr;
89 src.sin_port = (__force u16)htons(0);
90
91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
92 if (ret) {
93 rdsdebug("bind failed with %d at address %u.%u.%u.%u\n",
94 ret, NIPQUAD(conn->c_laddr));
95 goto out;
96 }
97
98 dest.sin_family = AF_INET;
99 dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
100 dest.sin_port = (__force u16)htons(RDS_TCP_PORT);
101
102 /*
103 * once we call connect() we can start getting callbacks and they
104 * own the socket
105 */
106 rds_tcp_set_callbacks(sock, conn);
107 ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
108 O_NONBLOCK);
109 sock = NULL;
110
111 rdsdebug("connect to address %u.%u.%u.%u returned %d\n",
112 NIPQUAD(conn->c_faddr), ret);
113 if (ret == -EINPROGRESS)
114 ret = 0;
115
116out:
117 if (sock)
118 sock_release(sock);
119 return ret;
120}
121
122/*
123 * Before killing the tcp socket this needs to serialize with callbacks. The
124 * caller has already grabbed the sending sem so we're serialized with other
125 * senders.
126 *
127 * TCP calls the callbacks with the sock lock so we hold it while we reset the
128 * callbacks to those set by TCP. Our callbacks won't execute again once we
129 * hold the sock lock.
130 */
131void rds_tcp_conn_shutdown(struct rds_connection *conn)
132{
133 struct rds_tcp_connection *tc = conn->c_transport_data;
134 struct socket *sock = tc->t_sock;
135
136 rdsdebug("shutting down conn %p tc %p sock %p\n", conn, tc, sock);
137
138 if (sock) {
139 sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
140 lock_sock(sock->sk);
141 rds_tcp_restore_callbacks(sock, tc); /* tc->tc_sock = NULL */
142
143 release_sock(sock->sk);
144 sock_release(sock);
145 };
146
147 if (tc->t_tinc) {
148 rds_inc_put(&tc->t_tinc->ti_inc);
149 tc->t_tinc = NULL;
150 }
151 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
152 tc->t_tinc_data_rem = 0;
153}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
new file mode 100644
index 000000000000..24b743eb0b1b
--- /dev/null
+++ b/net/rds/tcp_listen.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40/*
41 * cheesy, but simple..
42 */
43static void rds_tcp_accept_worker(struct work_struct *work);
44static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
45static struct socket *rds_tcp_listen_sock;
46
47static int rds_tcp_accept_one(struct socket *sock)
48{
49 struct socket *new_sock = NULL;
50 struct rds_connection *conn;
51 int ret;
52 struct inet_sock *inet;
53
54 ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
55 sock->sk->sk_protocol, &new_sock);
56 if (ret)
57 goto out;
58
59 new_sock->type = sock->type;
60 new_sock->ops = sock->ops;
61 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
62 if (ret < 0)
63 goto out;
64
65 rds_tcp_tune(new_sock);
66
67 inet = inet_sk(new_sock->sk);
68
69 rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n",
70 NIPQUAD(inet->saddr), ntohs(inet->sport),
71 NIPQUAD(inet->daddr), ntohs(inet->dport));
72
73 conn = rds_conn_create(inet->saddr, inet->daddr, &rds_tcp_transport,
74 GFP_KERNEL);
75 if (IS_ERR(conn)) {
76 ret = PTR_ERR(conn);
77 goto out;
78 }
79
80 /*
81 * see the comment above rds_queue_delayed_reconnect()
82 */
83 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
84 if (rds_conn_state(conn) == RDS_CONN_UP)
85 rds_tcp_stats_inc(s_tcp_listen_closed_stale);
86 else
87 rds_tcp_stats_inc(s_tcp_connect_raced);
88 rds_conn_drop(conn);
89 ret = 0;
90 goto out;
91 }
92
93 rds_tcp_set_callbacks(new_sock, conn);
94 rds_connect_complete(conn);
95 new_sock = NULL;
96 ret = 0;
97
98out:
99 if (new_sock)
100 sock_release(new_sock);
101 return ret;
102}
103
104static void rds_tcp_accept_worker(struct work_struct *work)
105{
106 while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
107 cond_resched();
108}
109
110void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
111{
112 void (*ready)(struct sock *sk, int bytes);
113
114 rdsdebug("listen data ready sk %p\n", sk);
115
116 read_lock(&sk->sk_callback_lock);
117 ready = sk->sk_user_data;
118 if (ready == NULL) { /* check for teardown race */
119 ready = sk->sk_data_ready;
120 goto out;
121 }
122
123 /*
124 * ->sk_data_ready is also called for a newly established child socket
125 * before it has been accepted and the accepter has set up their
126 * data_ready.. we only want to queue listen work for our listening
127 * socket
128 */
129 if (sk->sk_state == TCP_LISTEN)
130 queue_work(rds_wq, &rds_tcp_listen_work);
131
132out:
133 read_unlock(&sk->sk_callback_lock);
134 ready(sk, bytes);
135}
136
137int __init rds_tcp_listen_init(void)
138{
139 struct sockaddr_in sin;
140 struct socket *sock = NULL;
141 int ret;
142
143 ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
144 if (ret < 0)
145 goto out;
146
147 sock->sk->sk_reuse = 1;
148 rds_tcp_nonagle(sock);
149
150 write_lock_bh(&sock->sk->sk_callback_lock);
151 sock->sk->sk_user_data = sock->sk->sk_data_ready;
152 sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
153 write_unlock_bh(&sock->sk->sk_callback_lock);
154
155 sin.sin_family = PF_INET,
156 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
157 sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
158
159 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
160 if (ret < 0)
161 goto out;
162
163 ret = sock->ops->listen(sock, 64);
164 if (ret < 0)
165 goto out;
166
167 rds_tcp_listen_sock = sock;
168 sock = NULL;
169out:
170 if (sock)
171 sock_release(sock);
172 return ret;
173}
174
175void rds_tcp_listen_stop(void)
176{
177 struct socket *sock = rds_tcp_listen_sock;
178 struct sock *sk;
179
180 if (sock == NULL)
181 return;
182
183 sk = sock->sk;
184
185 /* serialize with and prevent further callbacks */
186 lock_sock(sk);
187 write_lock_bh(&sk->sk_callback_lock);
188 if (sk->sk_user_data) {
189 sk->sk_data_ready = sk->sk_user_data;
190 sk->sk_user_data = NULL;
191 }
192 write_unlock_bh(&sk->sk_callback_lock);
193 release_sock(sk);
194
195 /* wait for accepts to stop and close the socket */
196 flush_workqueue(rds_wq);
197 sock_release(sock);
198 rds_tcp_listen_sock = NULL;
199}
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
new file mode 100644
index 000000000000..c00dafffbb5a
--- /dev/null
+++ b/net/rds/tcp_recv.c
@@ -0,0 +1,356 @@
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <net/tcp.h>
35
36#include "rds.h"
37#include "tcp.h"
38
39static struct kmem_cache *rds_tcp_incoming_slab;
40
41void rds_tcp_inc_purge(struct rds_incoming *inc)
42{
43 struct rds_tcp_incoming *tinc;
44 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
45 rdsdebug("purging tinc %p inc %p\n", tinc, inc);
46 skb_queue_purge(&tinc->ti_skb_list);
47}
48
49void rds_tcp_inc_free(struct rds_incoming *inc)
50{
51 struct rds_tcp_incoming *tinc;
52 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
53 rds_tcp_inc_purge(inc);
54 rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
55 kmem_cache_free(rds_tcp_incoming_slab, tinc);
56}
57
58/*
59 * this is pretty lame, but, whatever.
60 */
61int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
62 size_t size)
63{
64 struct rds_tcp_incoming *tinc;
65 struct iovec *iov, tmp;
66 struct sk_buff *skb;
67 unsigned long to_copy, skb_off;
68 int ret = 0;
69
70 if (size == 0)
71 goto out;
72
73 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
74 iov = first_iov;
75 tmp = *iov;
76
77 skb_queue_walk(&tinc->ti_skb_list, skb) {
78 skb_off = 0;
79 while (skb_off < skb->len) {
80 while (tmp.iov_len == 0) {
81 iov++;
82 tmp = *iov;
83 }
84
85 to_copy = min(tmp.iov_len, size);
86 to_copy = min(to_copy, skb->len - skb_off);
87
88 rdsdebug("ret %d size %zu skb %p skb_off %lu "
89 "skblen %d iov_base %p iov_len %zu cpy %lu\n",
90 ret, size, skb, skb_off, skb->len,
91 tmp.iov_base, tmp.iov_len, to_copy);
92
93 /* modifies tmp as it copies */
94 if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
95 to_copy)) {
96 ret = -EFAULT;
97 goto out;
98 }
99
100 size -= to_copy;
101 ret += to_copy;
102 skb_off += to_copy;
103 if (size == 0)
104 goto out;
105 }
106 }
107out:
108 return ret;
109}
110
111/*
112 * We have a series of skbs that have fragmented pieces of the congestion
113 * bitmap. They must add up to the exact size of the congestion bitmap. We
114 * use the skb helpers to copy those into the pages that make up the in-memory
115 * congestion bitmap for the remote address of this connection. We then tell
116 * the congestion core that the bitmap has been changed so that it can wake up
117 * sleepers.
118 *
119 * This is racing with sending paths which are using test_bit to see if the
120 * bitmap indicates that their recipient is congested.
121 */
122
123static void rds_tcp_cong_recv(struct rds_connection *conn,
124 struct rds_tcp_incoming *tinc)
125{
126 struct sk_buff *skb;
127 unsigned int to_copy, skb_off;
128 unsigned int map_off;
129 unsigned int map_page;
130 struct rds_cong_map *map;
131 int ret;
132
133 /* catch completely corrupt packets */
134 if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
135 return;
136
137 map_page = 0;
138 map_off = 0;
139 map = conn->c_fcong;
140
141 skb_queue_walk(&tinc->ti_skb_list, skb) {
142 skb_off = 0;
143 while (skb_off < skb->len) {
144 to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
145 skb->len - skb_off);
146
147 BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
148
149 /* only returns 0 or -error */
150 ret = skb_copy_bits(skb, skb_off,
151 (void *)map->m_page_addrs[map_page] + map_off,
152 to_copy);
153 BUG_ON(ret != 0);
154
155 skb_off += to_copy;
156 map_off += to_copy;
157 if (map_off == PAGE_SIZE) {
158 map_off = 0;
159 map_page++;
160 }
161 }
162 }
163
164 rds_cong_map_updated(map, ~(u64) 0);
165}
166
167struct rds_tcp_desc_arg {
168 struct rds_connection *conn;
169 gfp_t gfp;
170 enum km_type km;
171};
172
173static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
174 unsigned int offset, size_t len)
175{
176 struct rds_tcp_desc_arg *arg = desc->arg.data;
177 struct rds_connection *conn = arg->conn;
178 struct rds_tcp_connection *tc = conn->c_transport_data;
179 struct rds_tcp_incoming *tinc = tc->t_tinc;
180 struct sk_buff *clone;
181 size_t left = len, to_copy;
182
183 rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
184 len);
185
186 /*
187 * tcp_read_sock() interprets partial progress as an indication to stop
188 * processing.
189 */
190 while (left) {
191 if (tinc == NULL) {
192 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
193 arg->gfp);
194 if (tinc == NULL) {
195 desc->error = -ENOMEM;
196 goto out;
197 }
198 tc->t_tinc = tinc;
199 rdsdebug("alloced tinc %p\n", tinc);
200 rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr);
201 /*
202 * XXX * we might be able to use the __ variants when
203 * we've already serialized at a higher level.
204 */
205 skb_queue_head_init(&tinc->ti_skb_list);
206 }
207
208 if (left && tc->t_tinc_hdr_rem) {
209 to_copy = min(tc->t_tinc_hdr_rem, left);
210 rdsdebug("copying %zu header from skb %p\n", to_copy,
211 skb);
212 skb_copy_bits(skb, offset,
213 (char *)&tinc->ti_inc.i_hdr +
214 sizeof(struct rds_header) -
215 tc->t_tinc_hdr_rem,
216 to_copy);
217 tc->t_tinc_hdr_rem -= to_copy;
218 left -= to_copy;
219 offset += to_copy;
220
221 if (tc->t_tinc_hdr_rem == 0) {
222 /* could be 0 for a 0 len message */
223 tc->t_tinc_data_rem =
224 be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
225 }
226 }
227
228 if (left && tc->t_tinc_data_rem) {
229 clone = skb_clone(skb, arg->gfp);
230 if (clone == NULL) {
231 desc->error = -ENOMEM;
232 goto out;
233 }
234
235 to_copy = min(tc->t_tinc_data_rem, left);
236 pskb_pull(clone, offset);
237 pskb_trim(clone, to_copy);
238 skb_queue_tail(&tinc->ti_skb_list, clone);
239
240 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
241 "clone %p data %p len %d\n",
242 skb, skb->data, skb->len, offset, to_copy,
243 clone, clone->data, clone->len);
244
245 tc->t_tinc_data_rem -= to_copy;
246 left -= to_copy;
247 offset += to_copy;
248 }
249
250 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
251 if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
252 rds_tcp_cong_recv(conn, tinc);
253 else
254 rds_recv_incoming(conn, conn->c_faddr,
255 conn->c_laddr, &tinc->ti_inc,
256 arg->gfp, arg->km);
257
258 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
259 tc->t_tinc_data_rem = 0;
260 tc->t_tinc = NULL;
261 rds_inc_put(&tinc->ti_inc);
262 tinc = NULL;
263 }
264 }
265out:
266 rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
267 len, left, skb->len,
268 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
269 return len - left;
270}
271
272/* the caller has to hold the sock lock */
273int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, enum km_type km)
274{
275 struct rds_tcp_connection *tc = conn->c_transport_data;
276 struct socket *sock = tc->t_sock;
277 read_descriptor_t desc;
278 struct rds_tcp_desc_arg arg;
279
280 /* It's like glib in the kernel! */
281 arg.conn = conn;
282 arg.gfp = gfp;
283 arg.km = km;
284 desc.arg.data = &arg;
285 desc.error = 0;
286 desc.count = 1; /* give more than one skb per call */
287
288 tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
289 rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
290 desc.error);
291
292 return desc.error;
293}
294
295/*
296 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
297 * data_ready.
298 *
299 * if we fail to allocate we're in trouble.. blindly wait some time before
300 * trying again to see if the VM can free up something for us.
301 */
302int rds_tcp_recv(struct rds_connection *conn)
303{
304 struct rds_tcp_connection *tc = conn->c_transport_data;
305 struct socket *sock = tc->t_sock;
306 int ret = 0;
307
308 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
309
310 lock_sock(sock->sk);
311 ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0);
312 release_sock(sock->sk);
313
314 return ret;
315}
316
317void rds_tcp_data_ready(struct sock *sk, int bytes)
318{
319 void (*ready)(struct sock *sk, int bytes);
320 struct rds_connection *conn;
321 struct rds_tcp_connection *tc;
322
323 rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
324
325 read_lock(&sk->sk_callback_lock);
326 conn = sk->sk_user_data;
327 if (conn == NULL) { /* check for teardown race */
328 ready = sk->sk_data_ready;
329 goto out;
330 }
331
332 tc = conn->c_transport_data;
333 ready = tc->t_orig_data_ready;
334 rds_tcp_stats_inc(s_tcp_data_ready_calls);
335
336 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
338out:
339 read_unlock(&sk->sk_callback_lock);
340 ready(sk, bytes);
341}
342
343int __init rds_tcp_recv_init(void)
344{
345 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
346 sizeof(struct rds_tcp_incoming),
347 0, 0, NULL);
348 if (rds_tcp_incoming_slab == NULL)
349 return -ENOMEM;
350 return 0;
351}
352
353void rds_tcp_recv_exit(void)
354{
355 kmem_cache_destroy(rds_tcp_incoming_slab);
356}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
new file mode 100644
index 000000000000..ab545e0cd5d6
--- /dev/null
+++ b/net/rds/tcp_send.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40static void rds_tcp_cork(struct socket *sock, int val)
41{
42 mm_segment_t oldfs;
43
44 oldfs = get_fs();
45 set_fs(KERNEL_DS);
46 sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
47 sizeof(val));
48 set_fs(oldfs);
49}
50
51void rds_tcp_xmit_prepare(struct rds_connection *conn)
52{
53 struct rds_tcp_connection *tc = conn->c_transport_data;
54
55 rds_tcp_cork(tc->t_sock, 1);
56}
57
58void rds_tcp_xmit_complete(struct rds_connection *conn)
59{
60 struct rds_tcp_connection *tc = conn->c_transport_data;
61
62 rds_tcp_cork(tc->t_sock, 0);
63}
64
65/* the core send_sem serializes this with other xmit and shutdown */
66int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
67{
68 struct kvec vec = {
69 .iov_base = data,
70 .iov_len = len,
71 };
72 struct msghdr msg = {
73 .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
74 };
75
76 return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
77}
78
79/* the core send_sem serializes this with other xmit and shutdown */
80int rds_tcp_xmit_cong_map(struct rds_connection *conn,
81 struct rds_cong_map *map, unsigned long offset)
82{
83 static struct rds_header rds_tcp_map_header = {
84 .h_flags = RDS_FLAG_CONG_BITMAP,
85 };
86 struct rds_tcp_connection *tc = conn->c_transport_data;
87 unsigned long i;
88 int ret;
89 int copied = 0;
90
91 /* Some problem claims cpu_to_be32(constant) isn't a constant. */
92 rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
93
94 if (offset < sizeof(struct rds_header)) {
95 ret = rds_tcp_sendmsg(tc->t_sock,
96 (void *)&rds_tcp_map_header + offset,
97 sizeof(struct rds_header) - offset);
98 if (ret <= 0)
99 return ret;
100 offset += ret;
101 copied = ret;
102 if (offset < sizeof(struct rds_header))
103 return ret;
104 }
105
106 offset -= sizeof(struct rds_header);
107 i = offset / PAGE_SIZE;
108 offset = offset % PAGE_SIZE;
109 BUG_ON(i >= RDS_CONG_MAP_PAGES);
110
111 do {
112 ret = tc->t_sock->ops->sendpage(tc->t_sock,
113 virt_to_page(map->m_page_addrs[i]),
114 offset, PAGE_SIZE - offset,
115 MSG_DONTWAIT);
116 if (ret <= 0)
117 break;
118 copied += ret;
119 offset += ret;
120 if (offset == PAGE_SIZE) {
121 offset = 0;
122 i++;
123 }
124 } while (i < RDS_CONG_MAP_PAGES);
125
126 return copied ? copied : ret;
127}
128
129/* the core send_sem serializes this with other xmit and shutdown */
130int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
131 unsigned int hdr_off, unsigned int sg, unsigned int off)
132{
133 struct rds_tcp_connection *tc = conn->c_transport_data;
134 int done = 0;
135 int ret = 0;
136
137 if (hdr_off == 0) {
138 /*
139 * m_ack_seq is set to the sequence number of the last byte of
140 * header and data. see rds_tcp_is_acked().
141 */
142 tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc);
143 rm->m_ack_seq = tc->t_last_sent_nxt +
144 sizeof(struct rds_header) +
145 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
146 smp_mb__before_clear_bit();
147 set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
148 tc->t_last_expected_una = rm->m_ack_seq + 1;
149
150 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
151 rm, rds_tcp_snd_nxt(tc),
152 (unsigned long long)rm->m_ack_seq);
153 }
154
155 if (hdr_off < sizeof(struct rds_header)) {
156 /* see rds_tcp_write_space() */
157 set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags);
158
159 ret = rds_tcp_sendmsg(tc->t_sock,
160 (void *)&rm->m_inc.i_hdr + hdr_off,
161 sizeof(rm->m_inc.i_hdr) - hdr_off);
162 if (ret < 0)
163 goto out;
164 done += ret;
165 if (hdr_off + done != sizeof(struct rds_header))
166 goto out;
167 }
168
169 while (sg < rm->m_nents) {
170 ret = tc->t_sock->ops->sendpage(tc->t_sock,
171 sg_page(&rm->m_sg[sg]),
172 rm->m_sg[sg].offset + off,
173 rm->m_sg[sg].length - off,
174 MSG_DONTWAIT|MSG_NOSIGNAL);
175 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]),
176 rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off,
177 ret);
178 if (ret <= 0)
179 break;
180
181 off += ret;
182 done += ret;
183 if (off == rm->m_sg[sg].length) {
184 off = 0;
185 sg++;
186 }
187 }
188
189out:
190 if (ret <= 0) {
191 /* write_space will hit after EAGAIN, all else fatal */
192 if (ret == -EAGAIN) {
193 rds_tcp_stats_inc(s_tcp_sndbuf_full);
194 ret = 0;
195 } else {
196 printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u "
197 "returned %d, disconnecting and reconnecting\n",
198 NIPQUAD(conn->c_faddr), ret);
199 rds_conn_drop(conn);
200 }
201 }
202 if (done == 0)
203 done = ret;
204 return done;
205}
206
207/*
208 * rm->m_ack_seq is set to the tcp sequence number that corresponds to the
209 * last byte of the message, including the header. This means that the
210 * entire message has been received if rm->m_ack_seq is "before" the next
211 * unacked byte of the TCP sequence space. We have to do very careful
212 * wrapping 32bit comparisons here.
213 */
214static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack)
215{
216 if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags))
217 return 0;
218 return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0;
219}
220
221void rds_tcp_write_space(struct sock *sk)
222{
223 void (*write_space)(struct sock *sk);
224 struct rds_connection *conn;
225 struct rds_tcp_connection *tc;
226
227 read_lock(&sk->sk_callback_lock);
228 conn = sk->sk_user_data;
229 if (conn == NULL) {
230 write_space = sk->sk_write_space;
231 goto out;
232 }
233
234 tc = conn->c_transport_data;
235 rdsdebug("write_space for tc %p\n", tc);
236 write_space = tc->t_orig_write_space;
237 rds_tcp_stats_inc(s_tcp_write_space_calls);
238
239 rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc));
240 tc->t_last_seen_una = rds_tcp_snd_una(tc);
241 rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
242
243 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
244out:
245 read_unlock(&sk->sk_callback_lock);
246
247 /*
248 * write_space is only called when data leaves tcp's send queue if
249 * SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put
250 * data in tcp's send queue because we use write_space to parse the
251 * sequence numbers and notice that rds messages have been fully
252 * received.
253 *
254 * tcp's write_space clears SOCK_NOSPACE if the send queue has more
255 * than a certain amount of space. So we need to set it again *after*
256 * we call tcp's write_space or else we might only get called on the
257 * first of a series of incoming tcp acks.
258 */
259 write_space(sk);
260
261 if (sk->sk_socket)
262 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
263}
diff --git a/net/rds/tcp_stats.c b/net/rds/tcp_stats.c
new file mode 100644
index 000000000000..d5898d03cd68
--- /dev/null
+++ b/net/rds/tcp_stats.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/percpu.h>
34#include <linux/seq_file.h>
35#include <linux/proc_fs.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats)
41 ____cacheline_aligned;
42
43static const char const *rds_tcp_stat_names[] = {
44 "tcp_data_ready_calls",
45 "tcp_write_space_calls",
46 "tcp_sndbuf_full",
47 "tcp_connect_raced",
48 "tcp_listen_closed_stale",
49};
50
51unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
52 unsigned int avail)
53{
54 struct rds_tcp_statistics stats = {0, };
55 uint64_t *src;
56 uint64_t *sum;
57 size_t i;
58 int cpu;
59
60 if (avail < ARRAY_SIZE(rds_tcp_stat_names))
61 goto out;
62
63 for_each_online_cpu(cpu) {
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
65 sum = (uint64_t *)&stats;
66 for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
67 *(sum++) += *(src++);
68 }
69
70 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names,
71 ARRAY_SIZE(rds_tcp_stat_names));
72out:
73 return ARRAY_SIZE(rds_tcp_stat_names);
74}
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 828a1bf9ea92..dd7e0cad1e7c 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -68,6 +68,7 @@
68 * (TCP, IB/RDMA) to provide the necessary synchronisation. 68 * (TCP, IB/RDMA) to provide the necessary synchronisation.
69 */ 69 */
70struct workqueue_struct *rds_wq; 70struct workqueue_struct *rds_wq;
71EXPORT_SYMBOL_GPL(rds_wq);
71 72
72void rds_connect_complete(struct rds_connection *conn) 73void rds_connect_complete(struct rds_connection *conn)
73{ 74{
@@ -89,6 +90,7 @@ void rds_connect_complete(struct rds_connection *conn)
89 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 90 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
90 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
91} 92}
93EXPORT_SYMBOL_GPL(rds_connect_complete);
92 94
93/* 95/*
94 * This random exponential backoff is relied on to eventually resolve racing 96 * This random exponential backoff is relied on to eventually resolve racing
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 767da61ad2f3..7e1067901353 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -37,7 +37,7 @@
37#include "rds.h" 37#include "rds.h"
38#include "loop.h" 38#include "loop.h"
39 39
40static LIST_HEAD(rds_transports); 40static struct rds_transport *transports[RDS_TRANS_COUNT];
41static DECLARE_RWSEM(rds_trans_sem); 41static DECLARE_RWSEM(rds_trans_sem);
42 42
43int rds_trans_register(struct rds_transport *trans) 43int rds_trans_register(struct rds_transport *trans)
@@ -46,36 +46,44 @@ int rds_trans_register(struct rds_transport *trans)
46 46
47 down_write(&rds_trans_sem); 47 down_write(&rds_trans_sem);
48 48
49 list_add_tail(&trans->t_item, &rds_transports); 49 if (transports[trans->t_type])
50 printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name); 50 printk(KERN_ERR "RDS Transport type %d already registered\n",
51 trans->t_type);
52 else {
53 transports[trans->t_type] = trans;
54 printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name);
55 }
51 56
52 up_write(&rds_trans_sem); 57 up_write(&rds_trans_sem);
53 58
54 return 0; 59 return 0;
55} 60}
61EXPORT_SYMBOL_GPL(rds_trans_register);
56 62
57void rds_trans_unregister(struct rds_transport *trans) 63void rds_trans_unregister(struct rds_transport *trans)
58{ 64{
59 down_write(&rds_trans_sem); 65 down_write(&rds_trans_sem);
60 66
61 list_del_init(&trans->t_item); 67 transports[trans->t_type] = NULL;
62 printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name); 68 printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name);
63 69
64 up_write(&rds_trans_sem); 70 up_write(&rds_trans_sem);
65} 71}
72EXPORT_SYMBOL_GPL(rds_trans_unregister);
66 73
67struct rds_transport *rds_trans_get_preferred(__be32 addr) 74struct rds_transport *rds_trans_get_preferred(__be32 addr)
68{ 75{
69 struct rds_transport *trans;
70 struct rds_transport *ret = NULL; 76 struct rds_transport *ret = NULL;
77 int i;
71 78
72 if (IN_LOOPBACK(ntohl(addr))) 79 if (IN_LOOPBACK(ntohl(addr)))
73 return &rds_loop_transport; 80 return &rds_loop_transport;
74 81
75 down_read(&rds_trans_sem); 82 down_read(&rds_trans_sem);
76 list_for_each_entry(trans, &rds_transports, t_item) { 83 for (i = 0; i < RDS_TRANS_COUNT; i++)
77 if (trans->laddr_check(addr) == 0) { 84 {
78 ret = trans; 85 if (transports[i] && (transports[i]->laddr_check(addr) == 0)) {
86 ret = transports[i];
79 break; 87 break;
80 } 88 }
81 } 89 }
@@ -97,12 +105,15 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
97 struct rds_transport *trans; 105 struct rds_transport *trans;
98 unsigned int total = 0; 106 unsigned int total = 0;
99 unsigned int part; 107 unsigned int part;
108 int i;
100 109
101 rds_info_iter_unmap(iter); 110 rds_info_iter_unmap(iter);
102 down_read(&rds_trans_sem); 111 down_read(&rds_trans_sem);
103 112
104 list_for_each_entry(trans, &rds_transports, t_item) { 113 for (i = 0; i < RDS_TRANS_COUNT; i++)
105 if (trans->stats_info_copy == NULL) 114 {
115 trans = transports[i];
116 if (!trans || !trans->stats_info_copy)
106 continue; 117 continue;
107 118
108 part = trans->stats_info_copy(iter, avail); 119 part = trans->stats_info_copy(iter, avail);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index f0a76f6bca71..e5f478ca3d61 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -954,6 +954,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
954 struct rose_sock *rose = rose_sk(sk); 954 struct rose_sock *rose = rose_sk(sk);
955 int n; 955 int n;
956 956
957 memset(srose, 0, sizeof(*srose));
957 if (peer != 0) { 958 if (peer != 0) {
958 if (sk->sk_state != TCP_ESTABLISHED) 959 if (sk->sk_state != TCP_ESTABLISHED)
959 return -ENOTCONN; 960 return -ENOTCONN;
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index c711e2edf5ce..424b893d1450 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -131,7 +131,7 @@ static int rose_close(struct net_device *dev)
131 return 0; 131 return 0;
132} 132}
133 133
134static int rose_xmit(struct sk_buff *skb, struct net_device *dev) 134static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
135{ 135{
136 struct net_device_stats *stats = &dev->stats; 136 struct net_device_stats *stats = &dev->stats;
137 137
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 54d950cd4b8d..f14e71bfa58f 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the Linux Traffic Control Unit. 2# Makefile for the Linux Traffic Control Unit.
3# 3#
4 4
5obj-y := sch_generic.o 5obj-y := sch_generic.o sch_mq.o
6 6
7obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o 7obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
8obj-$(CONFIG_NET_CLS) += cls_api.o 8obj-$(CONFIG_NET_CLS) += cls_api.o
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 09cdcdfe7e91..6a536949cdc0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -168,8 +168,7 @@ replay:
168 168
169 /* Find qdisc */ 169 /* Find qdisc */
170 if (!parent) { 170 if (!parent) {
171 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 171 q = dev->qdisc;
172 q = dev_queue->qdisc_sleeping;
173 parent = q->handle; 172 parent = q->handle;
174 } else { 173 } else {
175 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 174 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -181,6 +180,9 @@ replay:
181 if ((cops = q->ops->cl_ops) == NULL) 180 if ((cops = q->ops->cl_ops) == NULL)
182 return -EINVAL; 181 return -EINVAL;
183 182
183 if (cops->tcf_chain == NULL)
184 return -EOPNOTSUPP;
185
184 /* Do we search for filter, attached to class? */ 186 /* Do we search for filter, attached to class? */
185 if (TC_H_MIN(parent)) { 187 if (TC_H_MIN(parent)) {
186 cl = cops->get(q, parent); 188 cl = cops->get(q, parent);
@@ -405,7 +407,6 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
405static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 407static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
406{ 408{
407 struct net *net = sock_net(skb->sk); 409 struct net *net = sock_net(skb->sk);
408 struct netdev_queue *dev_queue;
409 int t; 410 int t;
410 int s_t; 411 int s_t;
411 struct net_device *dev; 412 struct net_device *dev;
@@ -424,15 +425,16 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
424 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 425 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
425 return skb->len; 426 return skb->len;
426 427
427 dev_queue = netdev_get_tx_queue(dev, 0);
428 if (!tcm->tcm_parent) 428 if (!tcm->tcm_parent)
429 q = dev_queue->qdisc_sleeping; 429 q = dev->qdisc;
430 else 430 else
431 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 431 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
432 if (!q) 432 if (!q)
433 goto out; 433 goto out;
434 if ((cops = q->ops->cl_ops) == NULL) 434 if ((cops = q->ops->cl_ops) == NULL)
435 goto errout; 435 goto errout;
436 if (cops->tcf_chain == NULL)
437 goto errout;
436 if (TC_H_MIN(tcm->tcm_parent)) { 438 if (TC_H_MIN(tcm->tcm_parent)) {
437 cl = cops->get(q, tcm->tcm_parent); 439 cl = cops->get(q, tcm->tcm_parent);
438 if (cl == 0) 440 if (cl == 0)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 24d17ce9c294..3af106140f35 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -207,7 +207,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
207static void qdisc_list_add(struct Qdisc *q) 207static void qdisc_list_add(struct Qdisc *q)
208{ 208{
209 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) 209 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
210 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); 210 list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
211} 211}
212 212
213void qdisc_list_del(struct Qdisc *q) 213void qdisc_list_del(struct Qdisc *q)
@@ -219,17 +219,11 @@ EXPORT_SYMBOL(qdisc_list_del);
219 219
220struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 220struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
221{ 221{
222 unsigned int i;
223 struct Qdisc *q; 222 struct Qdisc *q;
224 223
225 for (i = 0; i < dev->num_tx_queues; i++) { 224 q = qdisc_match_from_root(dev->qdisc, handle);
226 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 225 if (q)
227 struct Qdisc *txq_root = txq->qdisc_sleeping; 226 goto out;
228
229 q = qdisc_match_from_root(txq_root, handle);
230 if (q)
231 goto out;
232 }
233 227
234 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); 228 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
235out: 229out:
@@ -616,32 +610,6 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
616 return i>0 ? autohandle : 0; 610 return i>0 ? autohandle : 0;
617} 611}
618 612
619/* Attach toplevel qdisc to device queue. */
620
621static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
622 struct Qdisc *qdisc)
623{
624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
625 spinlock_t *root_lock;
626
627 root_lock = qdisc_lock(oqdisc);
628 spin_lock_bh(root_lock);
629
630 /* Prune old scheduler */
631 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
632 qdisc_reset(oqdisc);
633
634 /* ... and graft new one */
635 if (qdisc == NULL)
636 qdisc = &noop_qdisc;
637 dev_queue->qdisc_sleeping = qdisc;
638 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
639
640 spin_unlock_bh(root_lock);
641
642 return oqdisc;
643}
644
645void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) 613void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
646{ 614{
647 const struct Qdisc_class_ops *cops; 615 const struct Qdisc_class_ops *cops;
@@ -710,6 +678,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
710 if (dev->flags & IFF_UP) 678 if (dev->flags & IFF_UP)
711 dev_deactivate(dev); 679 dev_deactivate(dev);
712 680
681 if (new && new->ops->attach) {
682 new->ops->attach(new);
683 num_q = 0;
684 }
685
713 for (i = 0; i < num_q; i++) { 686 for (i = 0; i < num_q; i++) {
714 struct netdev_queue *dev_queue = &dev->rx_queue; 687 struct netdev_queue *dev_queue = &dev->rx_queue;
715 688
@@ -720,22 +693,27 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
720 if (new && i > 0) 693 if (new && i > 0)
721 atomic_inc(&new->refcnt); 694 atomic_inc(&new->refcnt);
722 695
723 notify_and_destroy(skb, n, classid, old, new); 696 qdisc_destroy(old);
724 } 697 }
725 698
699 notify_and_destroy(skb, n, classid, dev->qdisc, new);
700 if (new && !new->ops->attach)
701 atomic_inc(&new->refcnt);
702 dev->qdisc = new ? : &noop_qdisc;
703
726 if (dev->flags & IFF_UP) 704 if (dev->flags & IFF_UP)
727 dev_activate(dev); 705 dev_activate(dev);
728 } else { 706 } else {
729 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 707 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
730 708
731 err = -EINVAL; 709 err = -EOPNOTSUPP;
732 710 if (cops && cops->graft) {
733 if (cops) {
734 unsigned long cl = cops->get(parent, classid); 711 unsigned long cl = cops->get(parent, classid);
735 if (cl) { 712 if (cl) {
736 err = cops->graft(parent, cl, new, &old); 713 err = cops->graft(parent, cl, new, &old);
737 cops->put(parent, cl); 714 cops->put(parent, cl);
738 } 715 } else
716 err = -ENOENT;
739 } 717 }
740 if (!err) 718 if (!err)
741 notify_and_destroy(skb, n, classid, old, new); 719 notify_and_destroy(skb, n, classid, old, new);
@@ -755,7 +733,8 @@ static struct lock_class_key qdisc_rx_lock;
755 733
756static struct Qdisc * 734static struct Qdisc *
757qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, 735qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
758 u32 parent, u32 handle, struct nlattr **tca, int *errp) 736 struct Qdisc *p, u32 parent, u32 handle,
737 struct nlattr **tca, int *errp)
759{ 738{
760 int err; 739 int err;
761 struct nlattr *kind = tca[TCA_KIND]; 740 struct nlattr *kind = tca[TCA_KIND];
@@ -832,24 +811,21 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
832 if (tca[TCA_RATE]) { 811 if (tca[TCA_RATE]) {
833 spinlock_t *root_lock; 812 spinlock_t *root_lock;
834 813
814 err = -EOPNOTSUPP;
815 if (sch->flags & TCQ_F_MQROOT)
816 goto err_out4;
817
835 if ((sch->parent != TC_H_ROOT) && 818 if ((sch->parent != TC_H_ROOT) &&
836 !(sch->flags & TCQ_F_INGRESS)) 819 !(sch->flags & TCQ_F_INGRESS) &&
820 (!p || !(p->flags & TCQ_F_MQROOT)))
837 root_lock = qdisc_root_sleeping_lock(sch); 821 root_lock = qdisc_root_sleeping_lock(sch);
838 else 822 else
839 root_lock = qdisc_lock(sch); 823 root_lock = qdisc_lock(sch);
840 824
841 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 825 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
842 root_lock, tca[TCA_RATE]); 826 root_lock, tca[TCA_RATE]);
843 if (err) { 827 if (err)
844 /* 828 goto err_out4;
845 * Any broken qdiscs that would require
846 * a ops->reset() here? The qdisc was never
847 * in action so it shouldn't be necessary.
848 */
849 if (ops->destroy)
850 ops->destroy(sch);
851 goto err_out3;
852 }
853 } 829 }
854 830
855 qdisc_list_add(sch); 831 qdisc_list_add(sch);
@@ -865,6 +841,15 @@ err_out2:
865err_out: 841err_out:
866 *errp = err; 842 *errp = err;
867 return NULL; 843 return NULL;
844
845err_out4:
846 /*
847 * Any broken qdiscs that would require a ops->reset() here?
848 * The qdisc was never in action so it shouldn't be necessary.
849 */
850 if (ops->destroy)
851 ops->destroy(sch);
852 goto err_out3;
868} 853}
869 854
870static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) 855static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
@@ -889,13 +874,16 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
889 qdisc_put_stab(sch->stab); 874 qdisc_put_stab(sch->stab);
890 sch->stab = stab; 875 sch->stab = stab;
891 876
892 if (tca[TCA_RATE]) 877 if (tca[TCA_RATE]) {
893 /* NB: ignores errors from replace_estimator 878 /* NB: ignores errors from replace_estimator
894 because change can't be undone. */ 879 because change can't be undone. */
880 if (sch->flags & TCQ_F_MQROOT)
881 goto out;
895 gen_replace_estimator(&sch->bstats, &sch->rate_est, 882 gen_replace_estimator(&sch->bstats, &sch->rate_est,
896 qdisc_root_sleeping_lock(sch), 883 qdisc_root_sleeping_lock(sch),
897 tca[TCA_RATE]); 884 tca[TCA_RATE]);
898 885 }
886out:
899 return 0; 887 return 0;
900} 888}
901 889
@@ -974,9 +962,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
974 q = dev->rx_queue.qdisc_sleeping; 962 q = dev->rx_queue.qdisc_sleeping;
975 } 963 }
976 } else { 964 } else {
977 struct netdev_queue *dev_queue; 965 q = dev->qdisc;
978 dev_queue = netdev_get_tx_queue(dev, 0);
979 q = dev_queue->qdisc_sleeping;
980 } 966 }
981 if (!q) 967 if (!q)
982 return -ENOENT; 968 return -ENOENT;
@@ -1044,9 +1030,7 @@ replay:
1044 q = dev->rx_queue.qdisc_sleeping; 1030 q = dev->rx_queue.qdisc_sleeping;
1045 } 1031 }
1046 } else { 1032 } else {
1047 struct netdev_queue *dev_queue; 1033 q = dev->qdisc;
1048 dev_queue = netdev_get_tx_queue(dev, 0);
1049 q = dev_queue->qdisc_sleeping;
1050 } 1034 }
1051 1035
1052 /* It may be default qdisc, ignore it */ 1036 /* It may be default qdisc, ignore it */
@@ -1123,13 +1107,19 @@ create_n_graft:
1123 if (!(n->nlmsg_flags&NLM_F_CREATE)) 1107 if (!(n->nlmsg_flags&NLM_F_CREATE))
1124 return -ENOENT; 1108 return -ENOENT;
1125 if (clid == TC_H_INGRESS) 1109 if (clid == TC_H_INGRESS)
1126 q = qdisc_create(dev, &dev->rx_queue, 1110 q = qdisc_create(dev, &dev->rx_queue, p,
1127 tcm->tcm_parent, tcm->tcm_parent, 1111 tcm->tcm_parent, tcm->tcm_parent,
1128 tca, &err); 1112 tca, &err);
1129 else 1113 else {
1130 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), 1114 unsigned int ntx = 0;
1115
1116 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1117 ntx = p->ops->cl_ops->select_queue(p, tcm);
1118
1119 q = qdisc_create(dev, netdev_get_tx_queue(dev, ntx), p,
1131 tcm->tcm_parent, tcm->tcm_handle, 1120 tcm->tcm_parent, tcm->tcm_handle,
1132 tca, &err); 1121 tca, &err);
1122 }
1133 if (q == NULL) { 1123 if (q == NULL) {
1134 if (err == -EAGAIN) 1124 if (err == -EAGAIN)
1135 goto replay; 1125 goto replay;
@@ -1291,8 +1281,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1291 s_q_idx = 0; 1281 s_q_idx = 0;
1292 q_idx = 0; 1282 q_idx = 0;
1293 1283
1294 dev_queue = netdev_get_tx_queue(dev, 0); 1284 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1295 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1296 goto done; 1285 goto done;
1297 1286
1298 dev_queue = &dev->rx_queue; 1287 dev_queue = &dev->rx_queue;
@@ -1323,7 +1312,6 @@ done:
1323static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1312static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1324{ 1313{
1325 struct net *net = sock_net(skb->sk); 1314 struct net *net = sock_net(skb->sk);
1326 struct netdev_queue *dev_queue;
1327 struct tcmsg *tcm = NLMSG_DATA(n); 1315 struct tcmsg *tcm = NLMSG_DATA(n);
1328 struct nlattr *tca[TCA_MAX + 1]; 1316 struct nlattr *tca[TCA_MAX + 1];
1329 struct net_device *dev; 1317 struct net_device *dev;
@@ -1361,7 +1349,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1361 1349
1362 /* Step 1. Determine qdisc handle X:0 */ 1350 /* Step 1. Determine qdisc handle X:0 */
1363 1351
1364 dev_queue = netdev_get_tx_queue(dev, 0);
1365 if (pid != TC_H_ROOT) { 1352 if (pid != TC_H_ROOT) {
1366 u32 qid1 = TC_H_MAJ(pid); 1353 u32 qid1 = TC_H_MAJ(pid);
1367 1354
@@ -1372,7 +1359,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1372 } else if (qid1) { 1359 } else if (qid1) {
1373 qid = qid1; 1360 qid = qid1;
1374 } else if (qid == 0) 1361 } else if (qid == 0)
1375 qid = dev_queue->qdisc_sleeping->handle; 1362 qid = dev->qdisc->handle;
1376 1363
1377 /* Now qid is genuine qdisc handle consistent 1364 /* Now qid is genuine qdisc handle consistent
1378 both with parent and child. 1365 both with parent and child.
@@ -1383,7 +1370,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1383 pid = TC_H_MAKE(qid, pid); 1370 pid = TC_H_MAKE(qid, pid);
1384 } else { 1371 } else {
1385 if (qid == 0) 1372 if (qid == 0)
1386 qid = dev_queue->qdisc_sleeping->handle; 1373 qid = dev->qdisc->handle;
1387 } 1374 }
1388 1375
1389 /* OK. Locate qdisc */ 1376 /* OK. Locate qdisc */
@@ -1417,7 +1404,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1417 goto out; 1404 goto out;
1418 break; 1405 break;
1419 case RTM_DELTCLASS: 1406 case RTM_DELTCLASS:
1420 err = cops->delete(q, cl); 1407 err = -EOPNOTSUPP;
1408 if (cops->delete)
1409 err = cops->delete(q, cl);
1421 if (err == 0) 1410 if (err == 0)
1422 tclass_notify(skb, n, q, cl, RTM_DELTCLASS); 1411 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1423 goto out; 1412 goto out;
@@ -1431,7 +1420,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1431 } 1420 }
1432 1421
1433 new_cl = cl; 1422 new_cl = cl;
1434 err = cops->change(q, clid, pid, tca, &new_cl); 1423 err = -EOPNOTSUPP;
1424 if (cops->change)
1425 err = cops->change(q, clid, pid, tca, &new_cl);
1435 if (err == 0) 1426 if (err == 0)
1436 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); 1427 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1437 1428
@@ -1584,8 +1575,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1584 s_t = cb->args[0]; 1575 s_t = cb->args[0];
1585 t = 0; 1576 t = 0;
1586 1577
1587 dev_queue = netdev_get_tx_queue(dev, 0); 1578 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1588 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1589 goto done; 1579 goto done;
1590 1580
1591 dev_queue = &dev->rx_queue; 1581 dev_queue = &dev->rx_queue;
@@ -1705,6 +1695,7 @@ static int __init pktsched_init(void)
1705{ 1695{
1706 register_qdisc(&pfifo_qdisc_ops); 1696 register_qdisc(&pfifo_qdisc_ops);
1707 register_qdisc(&bfifo_qdisc_ops); 1697 register_qdisc(&bfifo_qdisc_ops);
1698 register_qdisc(&mq_qdisc_ops);
1708 proc_net_fops_create(&init_net, "psched", 0, &psched_fops); 1699 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1709 1700
1710 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); 1701 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2a8b83af7c47..ab82f145f689 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -49,7 +49,7 @@ struct atm_flow_data {
49 struct socket *sock; /* for closing */ 49 struct socket *sock; /* for closing */
50 u32 classid; /* x:y type ID */ 50 u32 classid; /* x:y type ID */
51 int ref; /* reference count */ 51 int ref; /* reference count */
52 struct gnet_stats_basic bstats; 52 struct gnet_stats_basic_packed bstats;
53 struct gnet_stats_queue qstats; 53 struct gnet_stats_queue qstats;
54 struct atm_flow_data *next; 54 struct atm_flow_data *next;
55 struct atm_flow_data *excess; /* flow for excess traffic; 55 struct atm_flow_data *excess; /* flow for excess traffic;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 23a167670fd5..5b132c473264 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -128,7 +128,7 @@ struct cbq_class
128 long avgidle; 128 long avgidle;
129 long deficit; /* Saved deficit for WRR */ 129 long deficit; /* Saved deficit for WRR */
130 psched_time_t penalized; 130 psched_time_t penalized;
131 struct gnet_stats_basic bstats; 131 struct gnet_stats_basic_packed bstats;
132 struct gnet_stats_queue qstats; 132 struct gnet_stats_queue qstats;
133 struct gnet_stats_rate_est rate_est; 133 struct gnet_stats_rate_est rate_est;
134 struct tc_cbq_xstats xstats; 134 struct tc_cbq_xstats xstats;
@@ -1621,29 +1621,25 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1621{ 1621{
1622 struct cbq_class *cl = (struct cbq_class*)arg; 1622 struct cbq_class *cl = (struct cbq_class*)arg;
1623 1623
1624 if (cl) { 1624 if (new == NULL) {
1625 if (new == NULL) { 1625 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1626 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1626 &pfifo_qdisc_ops, cl->common.classid);
1627 &pfifo_qdisc_ops, 1627 if (new == NULL)
1628 cl->common.classid); 1628 return -ENOBUFS;
1629 if (new == NULL) 1629 } else {
1630 return -ENOBUFS;
1631 } else {
1632#ifdef CONFIG_NET_CLS_ACT 1630#ifdef CONFIG_NET_CLS_ACT
1633 if (cl->police == TC_POLICE_RECLASSIFY) 1631 if (cl->police == TC_POLICE_RECLASSIFY)
1634 new->reshape_fail = cbq_reshape_fail; 1632 new->reshape_fail = cbq_reshape_fail;
1635#endif 1633#endif
1636 }
1637 sch_tree_lock(sch);
1638 *old = cl->q;
1639 cl->q = new;
1640 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1641 qdisc_reset(*old);
1642 sch_tree_unlock(sch);
1643
1644 return 0;
1645 } 1634 }
1646 return -ENOENT; 1635 sch_tree_lock(sch);
1636 *old = cl->q;
1637 cl->q = new;
1638 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1639 qdisc_reset(*old);
1640 sch_tree_unlock(sch);
1641
1642 return 0;
1647} 1643}
1648 1644
1649static struct Qdisc * 1645static struct Qdisc *
@@ -1651,7 +1647,7 @@ cbq_leaf(struct Qdisc *sch, unsigned long arg)
1651{ 1647{
1652 struct cbq_class *cl = (struct cbq_class*)arg; 1648 struct cbq_class *cl = (struct cbq_class*)arg;
1653 1649
1654 return cl ? cl->q : NULL; 1650 return cl->q;
1655} 1651}
1656 1652
1657static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) 1653static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 7597fe146866..12b2fb04b29b 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -22,7 +22,7 @@ struct drr_class {
22 unsigned int refcnt; 22 unsigned int refcnt;
23 unsigned int filter_cnt; 23 unsigned int filter_cnt;
24 24
25 struct gnet_stats_basic bstats; 25 struct gnet_stats_basic_packed bstats;
26 struct gnet_stats_queue qstats; 26 struct gnet_stats_queue qstats;
27 struct gnet_stats_rate_est rate_est; 27 struct gnet_stats_rate_est rate_est;
28 struct list_head alist; 28 struct list_head alist;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 693df7ae33d8..4ae6aa562f2b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -406,18 +406,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] =
406 406
407#define PFIFO_FAST_BANDS 3 407#define PFIFO_FAST_BANDS 3
408 408
409static inline struct sk_buff_head *prio2list(struct sk_buff *skb, 409/*
410 struct Qdisc *qdisc) 410 * Private data for a pfifo_fast scheduler containing:
411 * - queues for the three band
412 * - bitmap indicating which of the bands contain skbs
413 */
414struct pfifo_fast_priv {
415 u32 bitmap;
416 struct sk_buff_head q[PFIFO_FAST_BANDS];
417};
418
419/*
420 * Convert a bitmap to the first band number where an skb is queued, where:
421 * bitmap=0 means there are no skbs on any band.
422 * bitmap=1 means there is an skb on band 0.
423 * bitmap=7 means there are skbs on all 3 bands, etc.
424 */
425static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
426
427static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
428 int band)
411{ 429{
412 struct sk_buff_head *list = qdisc_priv(qdisc); 430 return priv->q + band;
413 return list + prio2band[skb->priority & TC_PRIO_MAX];
414} 431}
415 432
416static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 433static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
417{ 434{
418 struct sk_buff_head *list = prio2list(skb, qdisc); 435 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
436 int band = prio2band[skb->priority & TC_PRIO_MAX];
437 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
438 struct sk_buff_head *list = band2list(priv, band);
419 439
420 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { 440 priv->bitmap |= (1 << band);
421 qdisc->q.qlen++; 441 qdisc->q.qlen++;
422 return __qdisc_enqueue_tail(skb, qdisc, list); 442 return __qdisc_enqueue_tail(skb, qdisc, list);
423 } 443 }
@@ -427,14 +447,18 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
427 447
428static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 448static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
429{ 449{
430 int prio; 450 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
431 struct sk_buff_head *list = qdisc_priv(qdisc); 451 int band = bitmap2band[priv->bitmap];
432 452
433 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 453 if (likely(band >= 0)) {
434 if (!skb_queue_empty(list + prio)) { 454 struct sk_buff_head *list = band2list(priv, band);
435 qdisc->q.qlen--; 455 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
436 return __qdisc_dequeue_head(qdisc, list + prio); 456
437 } 457 qdisc->q.qlen--;
458 if (skb_queue_empty(list))
459 priv->bitmap &= ~(1 << band);
460
461 return skb;
438 } 462 }
439 463
440 return NULL; 464 return NULL;
@@ -442,12 +466,13 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
442 466
443static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) 467static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
444{ 468{
445 int prio; 469 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
446 struct sk_buff_head *list = qdisc_priv(qdisc); 470 int band = bitmap2band[priv->bitmap];
471
472 if (band >= 0) {
473 struct sk_buff_head *list = band2list(priv, band);
447 474
448 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 475 return skb_peek(list);
449 if (!skb_queue_empty(list + prio))
450 return skb_peek(list + prio);
451 } 476 }
452 477
453 return NULL; 478 return NULL;
@@ -456,11 +481,12 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
456static void pfifo_fast_reset(struct Qdisc* qdisc) 481static void pfifo_fast_reset(struct Qdisc* qdisc)
457{ 482{
458 int prio; 483 int prio;
459 struct sk_buff_head *list = qdisc_priv(qdisc); 484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
460 485
461 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 486 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
462 __qdisc_reset_queue(qdisc, list + prio); 487 __qdisc_reset_queue(qdisc, band2list(priv, prio));
463 488
489 priv->bitmap = 0;
464 qdisc->qstats.backlog = 0; 490 qdisc->qstats.backlog = 0;
465 qdisc->q.qlen = 0; 491 qdisc->q.qlen = 0;
466} 492}
@@ -480,17 +506,17 @@ nla_put_failure:
480static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 506static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
481{ 507{
482 int prio; 508 int prio;
483 struct sk_buff_head *list = qdisc_priv(qdisc); 509 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
484 510
485 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 511 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
486 skb_queue_head_init(list + prio); 512 skb_queue_head_init(band2list(priv, prio));
487 513
488 return 0; 514 return 0;
489} 515}
490 516
491static struct Qdisc_ops pfifo_fast_ops __read_mostly = { 517struct Qdisc_ops pfifo_fast_ops __read_mostly = {
492 .id = "pfifo_fast", 518 .id = "pfifo_fast",
493 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), 519 .priv_size = sizeof(struct pfifo_fast_priv),
494 .enqueue = pfifo_fast_enqueue, 520 .enqueue = pfifo_fast_enqueue,
495 .dequeue = pfifo_fast_dequeue, 521 .dequeue = pfifo_fast_dequeue,
496 .peek = pfifo_fast_peek, 522 .peek = pfifo_fast_peek,
@@ -597,17 +623,29 @@ void qdisc_destroy(struct Qdisc *qdisc)
597} 623}
598EXPORT_SYMBOL(qdisc_destroy); 624EXPORT_SYMBOL(qdisc_destroy);
599 625
600static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) 626/* Attach toplevel qdisc to device queue. */
627struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
628 struct Qdisc *qdisc)
601{ 629{
602 unsigned int i; 630 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
631 spinlock_t *root_lock;
603 632
604 for (i = 0; i < dev->num_tx_queues; i++) { 633 root_lock = qdisc_lock(oqdisc);
605 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 634 spin_lock_bh(root_lock);
606 635
607 if (txq->qdisc_sleeping != &noop_qdisc) 636 /* Prune old scheduler */
608 return false; 637 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
609 } 638 qdisc_reset(oqdisc);
610 return true; 639
640 /* ... and graft new one */
641 if (qdisc == NULL)
642 qdisc = &noop_qdisc;
643 dev_queue->qdisc_sleeping = qdisc;
644 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
645
646 spin_unlock_bh(root_lock);
647
648 return oqdisc;
611} 649}
612 650
613static void attach_one_default_qdisc(struct net_device *dev, 651static void attach_one_default_qdisc(struct net_device *dev,
@@ -632,6 +670,26 @@ static void attach_one_default_qdisc(struct net_device *dev,
632 dev_queue->qdisc_sleeping = qdisc; 670 dev_queue->qdisc_sleeping = qdisc;
633} 671}
634 672
673static void attach_default_qdiscs(struct net_device *dev)
674{
675 struct netdev_queue *txq;
676 struct Qdisc *qdisc;
677
678 txq = netdev_get_tx_queue(dev, 0);
679
680 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
681 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
682 dev->qdisc = txq->qdisc_sleeping;
683 atomic_inc(&dev->qdisc->refcnt);
684 } else {
685 qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT);
686 if (qdisc) {
687 qdisc->ops->attach(qdisc);
688 dev->qdisc = qdisc;
689 }
690 }
691}
692
635static void transition_one_qdisc(struct net_device *dev, 693static void transition_one_qdisc(struct net_device *dev,
636 struct netdev_queue *dev_queue, 694 struct netdev_queue *dev_queue,
637 void *_need_watchdog) 695 void *_need_watchdog)
@@ -659,8 +717,8 @@ void dev_activate(struct net_device *dev)
659 virtual interfaces 717 virtual interfaces
660 */ 718 */
661 719
662 if (dev_all_qdisc_sleeping_noop(dev)) 720 if (dev->qdisc == &noop_qdisc)
663 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 721 attach_default_qdiscs(dev);
664 722
665 if (!netif_carrier_ok(dev)) 723 if (!netif_carrier_ok(dev))
666 /* Delay activation until next carrier-on event */ 724 /* Delay activation until next carrier-on event */
@@ -751,6 +809,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
751 809
752void dev_init_scheduler(struct net_device *dev) 810void dev_init_scheduler(struct net_device *dev)
753{ 811{
812 dev->qdisc = &noop_qdisc;
754 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 813 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
755 dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); 814 dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
756 815
@@ -776,5 +835,8 @@ void dev_shutdown(struct net_device *dev)
776{ 835{
777 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 836 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
778 shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); 837 shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
838 qdisc_destroy(dev->qdisc);
839 dev->qdisc = &noop_qdisc;
840
779 WARN_ON(timer_pending(&dev->watchdog_timer)); 841 WARN_ON(timer_pending(&dev->watchdog_timer));
780} 842}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 362c2811b2df..375d64cb1a3d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -116,7 +116,7 @@ struct hfsc_class
116 struct Qdisc_class_common cl_common; 116 struct Qdisc_class_common cl_common;
117 unsigned int refcnt; /* usage count */ 117 unsigned int refcnt; /* usage count */
118 118
119 struct gnet_stats_basic bstats; 119 struct gnet_stats_basic_packed bstats;
120 struct gnet_stats_queue qstats; 120 struct gnet_stats_queue qstats;
121 struct gnet_stats_rate_est rate_est; 121 struct gnet_stats_rate_est rate_est;
122 unsigned int level; /* class level in hierarchy */ 122 unsigned int level; /* class level in hierarchy */
@@ -1203,8 +1203,6 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1203{ 1203{
1204 struct hfsc_class *cl = (struct hfsc_class *)arg; 1204 struct hfsc_class *cl = (struct hfsc_class *)arg;
1205 1205
1206 if (cl == NULL)
1207 return -ENOENT;
1208 if (cl->level > 0) 1206 if (cl->level > 0)
1209 return -EINVAL; 1207 return -EINVAL;
1210 if (new == NULL) { 1208 if (new == NULL) {
@@ -1228,7 +1226,7 @@ hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1228{ 1226{
1229 struct hfsc_class *cl = (struct hfsc_class *)arg; 1227 struct hfsc_class *cl = (struct hfsc_class *)arg;
1230 1228
1231 if (cl != NULL && cl->level == 0) 1229 if (cl->level == 0)
1232 return cl->qdisc; 1230 return cl->qdisc;
1233 1231
1234 return NULL; 1232 return NULL;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 88cd02626621..85acab9dc6fd 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -74,7 +74,7 @@ enum htb_cmode {
74struct htb_class { 74struct htb_class {
75 struct Qdisc_class_common common; 75 struct Qdisc_class_common common;
76 /* general class parameters */ 76 /* general class parameters */
77 struct gnet_stats_basic bstats; 77 struct gnet_stats_basic_packed bstats;
78 struct gnet_stats_queue qstats; 78 struct gnet_stats_queue qstats;
79 struct gnet_stats_rate_est rate_est; 79 struct gnet_stats_rate_est rate_est;
80 struct tc_htb_xstats xstats; /* our special stats */ 80 struct tc_htb_xstats xstats; /* our special stats */
@@ -1117,30 +1117,29 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1117{ 1117{
1118 struct htb_class *cl = (struct htb_class *)arg; 1118 struct htb_class *cl = (struct htb_class *)arg;
1119 1119
1120 if (cl && !cl->level) { 1120 if (cl->level)
1121 if (new == NULL && 1121 return -EINVAL;
1122 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1122 if (new == NULL &&
1123 &pfifo_qdisc_ops, 1123 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1124 cl->common.classid)) 1124 &pfifo_qdisc_ops,
1125 == NULL) 1125 cl->common.classid)) == NULL)
1126 return -ENOBUFS; 1126 return -ENOBUFS;
1127 sch_tree_lock(sch); 1127
1128 *old = cl->un.leaf.q; 1128 sch_tree_lock(sch);
1129 cl->un.leaf.q = new; 1129 *old = cl->un.leaf.q;
1130 if (*old != NULL) { 1130 cl->un.leaf.q = new;
1131 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); 1131 if (*old != NULL) {
1132 qdisc_reset(*old); 1132 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1133 } 1133 qdisc_reset(*old);
1134 sch_tree_unlock(sch);
1135 return 0;
1136 } 1134 }
1137 return -ENOENT; 1135 sch_tree_unlock(sch);
1136 return 0;
1138} 1137}
1139 1138
1140static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) 1139static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1141{ 1140{
1142 struct htb_class *cl = (struct htb_class *)arg; 1141 struct htb_class *cl = (struct htb_class *)arg;
1143 return (cl && !cl->level) ? cl->un.leaf.q : NULL; 1142 return !cl->level ? cl->un.leaf.q : NULL;
1144} 1143}
1145 1144
1146static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) 1145static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 4a2b77374358..a9e646bdb605 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -22,12 +22,6 @@ struct ingress_qdisc_data {
22 22
23/* ------------------------- Class/flow operations ------------------------- */ 23/* ------------------------- Class/flow operations ------------------------- */
24 24
25static int ingress_graft(struct Qdisc *sch, unsigned long arg,
26 struct Qdisc *new, struct Qdisc **old)
27{
28 return -EOPNOTSUPP;
29}
30
31static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) 25static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
32{ 26{
33 return NULL; 27 return NULL;
@@ -48,12 +42,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl)
48{ 42{
49} 43}
50 44
51static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent,
52 struct nlattr **tca, unsigned long *arg)
53{
54 return 0;
55}
56
57static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) 45static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
58{ 46{
59 return; 47 return;
@@ -123,11 +111,9 @@ nla_put_failure:
123} 111}
124 112
125static const struct Qdisc_class_ops ingress_class_ops = { 113static const struct Qdisc_class_ops ingress_class_ops = {
126 .graft = ingress_graft,
127 .leaf = ingress_leaf, 114 .leaf = ingress_leaf,
128 .get = ingress_get, 115 .get = ingress_get,
129 .put = ingress_put, 116 .put = ingress_put,
130 .change = ingress_change,
131 .walk = ingress_walk, 117 .walk = ingress_walk,
132 .tcf_chain = ingress_find_tcf, 118 .tcf_chain = ingress_find_tcf,
133 .bind_tcf = ingress_bind_filter, 119 .bind_tcf = ingress_bind_filter,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
new file mode 100644
index 000000000000..dd5ee022f1f7
--- /dev/null
+++ b/net/sched/sch_mq.c
@@ -0,0 +1,235 @@
1/*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
3 *
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/skbuff.h>
16#include <net/netlink.h>
17#include <net/pkt_sched.h>
18
19struct mq_sched {
20 struct Qdisc **qdiscs;
21};
22
23static void mq_destroy(struct Qdisc *sch)
24{
25 struct net_device *dev = qdisc_dev(sch);
26 struct mq_sched *priv = qdisc_priv(sch);
27 unsigned int ntx;
28
29 if (!priv->qdiscs)
30 return;
31 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
32 qdisc_destroy(priv->qdiscs[ntx]);
33 kfree(priv->qdiscs);
34}
35
36static int mq_init(struct Qdisc *sch, struct nlattr *opt)
37{
38 struct net_device *dev = qdisc_dev(sch);
39 struct mq_sched *priv = qdisc_priv(sch);
40 struct netdev_queue *dev_queue;
41 struct Qdisc *qdisc;
42 unsigned int ntx;
43
44 if (sch->parent != TC_H_ROOT)
45 return -EOPNOTSUPP;
46
47 if (!netif_is_multiqueue(dev))
48 return -EOPNOTSUPP;
49
50 /* pre-allocate qdiscs, attachment can't fail */
51 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
52 GFP_KERNEL);
53 if (priv->qdiscs == NULL)
54 return -ENOMEM;
55
56 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
57 dev_queue = netdev_get_tx_queue(dev, ntx);
58 qdisc = qdisc_create_dflt(dev, dev_queue, &pfifo_fast_ops,
59 TC_H_MAKE(TC_H_MAJ(sch->handle),
60 TC_H_MIN(ntx + 1)));
61 if (qdisc == NULL)
62 goto err;
63 qdisc->flags |= TCQ_F_CAN_BYPASS;
64 priv->qdiscs[ntx] = qdisc;
65 }
66
67 sch->flags |= TCQ_F_MQROOT;
68 return 0;
69
70err:
71 mq_destroy(sch);
72 return -ENOMEM;
73}
74
75static void mq_attach(struct Qdisc *sch)
76{
77 struct net_device *dev = qdisc_dev(sch);
78 struct mq_sched *priv = qdisc_priv(sch);
79 struct Qdisc *qdisc;
80 unsigned int ntx;
81
82 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
83 qdisc = priv->qdiscs[ntx];
84 qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
85 if (qdisc)
86 qdisc_destroy(qdisc);
87 }
88 kfree(priv->qdiscs);
89 priv->qdiscs = NULL;
90}
91
92static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
93{
94 struct net_device *dev = qdisc_dev(sch);
95 struct Qdisc *qdisc;
96 unsigned int ntx;
97
98 sch->q.qlen = 0;
99 memset(&sch->bstats, 0, sizeof(sch->bstats));
100 memset(&sch->qstats, 0, sizeof(sch->qstats));
101
102 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
103 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
104 spin_lock_bh(qdisc_lock(qdisc));
105 sch->q.qlen += qdisc->q.qlen;
106 sch->bstats.bytes += qdisc->bstats.bytes;
107 sch->bstats.packets += qdisc->bstats.packets;
108 sch->qstats.qlen += qdisc->qstats.qlen;
109 sch->qstats.backlog += qdisc->qstats.backlog;
110 sch->qstats.drops += qdisc->qstats.drops;
111 sch->qstats.requeues += qdisc->qstats.requeues;
112 sch->qstats.overlimits += qdisc->qstats.overlimits;
113 spin_unlock_bh(qdisc_lock(qdisc));
114 }
115 return 0;
116}
117
118static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
119{
120 struct net_device *dev = qdisc_dev(sch);
121 unsigned long ntx = cl - 1;
122
123 if (ntx >= dev->num_tx_queues)
124 return NULL;
125 return netdev_get_tx_queue(dev, ntx);
126}
127
128static unsigned int mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
129{
130 unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
131
132 if (!mq_queue_get(sch, ntx))
133 return 0;
134 return ntx - 1;
135}
136
137static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
138 struct Qdisc **old)
139{
140 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
141 struct net_device *dev = qdisc_dev(sch);
142
143 if (dev->flags & IFF_UP)
144 dev_deactivate(dev);
145
146 *old = dev_graft_qdisc(dev_queue, new);
147
148 if (dev->flags & IFF_UP)
149 dev_activate(dev);
150 return 0;
151}
152
153static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
154{
155 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
156
157 return dev_queue->qdisc_sleeping;
158}
159
160static unsigned long mq_get(struct Qdisc *sch, u32 classid)
161{
162 unsigned int ntx = TC_H_MIN(classid);
163
164 if (!mq_queue_get(sch, ntx))
165 return 0;
166 return ntx;
167}
168
169static void mq_put(struct Qdisc *sch, unsigned long cl)
170{
171 return;
172}
173
174static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
175 struct sk_buff *skb, struct tcmsg *tcm)
176{
177 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
178
179 tcm->tcm_parent = TC_H_ROOT;
180 tcm->tcm_handle |= TC_H_MIN(cl);
181 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
182 return 0;
183}
184
185static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
186 struct gnet_dump *d)
187{
188 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
189
190 sch = dev_queue->qdisc_sleeping;
191 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
192 gnet_stats_copy_queue(d, &sch->qstats) < 0)
193 return -1;
194 return 0;
195}
196
197static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
198{
199 struct net_device *dev = qdisc_dev(sch);
200 unsigned int ntx;
201
202 if (arg->stop)
203 return;
204
205 arg->count = arg->skip;
206 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
207 if (arg->fn(sch, ntx + 1, arg) < 0) {
208 arg->stop = 1;
209 break;
210 }
211 arg->count++;
212 }
213}
214
215static const struct Qdisc_class_ops mq_class_ops = {
216 .select_queue = mq_select_queue,
217 .graft = mq_graft,
218 .leaf = mq_leaf,
219 .get = mq_get,
220 .put = mq_put,
221 .walk = mq_walk,
222 .dump = mq_dump_class,
223 .dump_stats = mq_dump_class_stats,
224};
225
226struct Qdisc_ops mq_qdisc_ops __read_mostly = {
227 .cl_ops = &mq_class_ops,
228 .id = "mq",
229 .priv_size = sizeof(struct mq_sched),
230 .init = mq_init,
231 .destroy = mq_destroy,
232 .attach = mq_attach,
233 .dump = mq_dump,
234 .owner = THIS_MODULE,
235};
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 912731203047..069f81c97277 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -298,9 +298,6 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
298 struct multiq_sched_data *q = qdisc_priv(sch); 298 struct multiq_sched_data *q = qdisc_priv(sch);
299 unsigned long band = arg - 1; 299 unsigned long band = arg - 1;
300 300
301 if (band >= q->bands)
302 return -EINVAL;
303
304 if (new == NULL) 301 if (new == NULL)
305 new = &noop_qdisc; 302 new = &noop_qdisc;
306 303
@@ -320,9 +317,6 @@ multiq_leaf(struct Qdisc *sch, unsigned long arg)
320 struct multiq_sched_data *q = qdisc_priv(sch); 317 struct multiq_sched_data *q = qdisc_priv(sch);
321 unsigned long band = arg - 1; 318 unsigned long band = arg - 1;
322 319
323 if (band >= q->bands)
324 return NULL;
325
326 return q->queues[band]; 320 return q->queues[band];
327} 321}
328 322
@@ -348,36 +342,13 @@ static void multiq_put(struct Qdisc *q, unsigned long cl)
348 return; 342 return;
349} 343}
350 344
351static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
352 struct nlattr **tca, unsigned long *arg)
353{
354 unsigned long cl = *arg;
355 struct multiq_sched_data *q = qdisc_priv(sch);
356
357 if (cl - 1 > q->bands)
358 return -ENOENT;
359 return 0;
360}
361
362static int multiq_delete(struct Qdisc *sch, unsigned long cl)
363{
364 struct multiq_sched_data *q = qdisc_priv(sch);
365 if (cl - 1 > q->bands)
366 return -ENOENT;
367 return 0;
368}
369
370
371static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, 345static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
372 struct sk_buff *skb, struct tcmsg *tcm) 346 struct sk_buff *skb, struct tcmsg *tcm)
373{ 347{
374 struct multiq_sched_data *q = qdisc_priv(sch); 348 struct multiq_sched_data *q = qdisc_priv(sch);
375 349
376 if (cl - 1 > q->bands)
377 return -ENOENT;
378 tcm->tcm_handle |= TC_H_MIN(cl); 350 tcm->tcm_handle |= TC_H_MIN(cl);
379 if (q->queues[cl-1]) 351 tcm->tcm_info = q->queues[cl-1]->handle;
380 tcm->tcm_info = q->queues[cl-1]->handle;
381 return 0; 352 return 0;
382} 353}
383 354
@@ -430,8 +401,6 @@ static const struct Qdisc_class_ops multiq_class_ops = {
430 .leaf = multiq_leaf, 401 .leaf = multiq_leaf,
431 .get = multiq_get, 402 .get = multiq_get,
432 .put = multiq_put, 403 .put = multiq_put,
433 .change = multiq_change,
434 .delete = multiq_delete,
435 .walk = multiq_walk, 404 .walk = multiq_walk,
436 .tcf_chain = multiq_find_tcf, 405 .tcf_chain = multiq_find_tcf,
437 .bind_tcf = multiq_bind, 406 .bind_tcf = multiq_bind,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 94cecef70145..0f73c412d04b 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -262,9 +262,6 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
262 struct prio_sched_data *q = qdisc_priv(sch); 262 struct prio_sched_data *q = qdisc_priv(sch);
263 unsigned long band = arg - 1; 263 unsigned long band = arg - 1;
264 264
265 if (band >= q->bands)
266 return -EINVAL;
267
268 if (new == NULL) 265 if (new == NULL)
269 new = &noop_qdisc; 266 new = &noop_qdisc;
270 267
@@ -284,9 +281,6 @@ prio_leaf(struct Qdisc *sch, unsigned long arg)
284 struct prio_sched_data *q = qdisc_priv(sch); 281 struct prio_sched_data *q = qdisc_priv(sch);
285 unsigned long band = arg - 1; 282 unsigned long band = arg - 1;
286 283
287 if (band >= q->bands)
288 return NULL;
289
290 return q->queues[band]; 284 return q->queues[band];
291} 285}
292 286
@@ -311,35 +305,13 @@ static void prio_put(struct Qdisc *q, unsigned long cl)
311 return; 305 return;
312} 306}
313 307
314static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg)
315{
316 unsigned long cl = *arg;
317 struct prio_sched_data *q = qdisc_priv(sch);
318
319 if (cl - 1 > q->bands)
320 return -ENOENT;
321 return 0;
322}
323
324static int prio_delete(struct Qdisc *sch, unsigned long cl)
325{
326 struct prio_sched_data *q = qdisc_priv(sch);
327 if (cl - 1 > q->bands)
328 return -ENOENT;
329 return 0;
330}
331
332
333static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, 308static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
334 struct tcmsg *tcm) 309 struct tcmsg *tcm)
335{ 310{
336 struct prio_sched_data *q = qdisc_priv(sch); 311 struct prio_sched_data *q = qdisc_priv(sch);
337 312
338 if (cl - 1 > q->bands)
339 return -ENOENT;
340 tcm->tcm_handle |= TC_H_MIN(cl); 313 tcm->tcm_handle |= TC_H_MIN(cl);
341 if (q->queues[cl-1]) 314 tcm->tcm_info = q->queues[cl-1]->handle;
342 tcm->tcm_info = q->queues[cl-1]->handle;
343 return 0; 315 return 0;
344} 316}
345 317
@@ -392,8 +364,6 @@ static const struct Qdisc_class_ops prio_class_ops = {
392 .leaf = prio_leaf, 364 .leaf = prio_leaf,
393 .get = prio_get, 365 .get = prio_get,
394 .put = prio_put, 366 .put = prio_put,
395 .change = prio_change,
396 .delete = prio_delete,
397 .walk = prio_walk, 367 .walk = prio_walk,
398 .tcf_chain = prio_find_tcf, 368 .tcf_chain = prio_find_tcf,
399 .bind_tcf = prio_bind, 369 .bind_tcf = prio_bind,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 2bdf241f6315..072cdf442f8e 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -268,8 +268,6 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
268{ 268{
269 struct red_sched_data *q = qdisc_priv(sch); 269 struct red_sched_data *q = qdisc_priv(sch);
270 270
271 if (cl != 1)
272 return -ENOENT;
273 tcm->tcm_handle |= TC_H_MIN(1); 271 tcm->tcm_handle |= TC_H_MIN(1);
274 tcm->tcm_info = q->qdisc->handle; 272 tcm->tcm_info = q->qdisc->handle;
275 return 0; 273 return 0;
@@ -308,17 +306,6 @@ static void red_put(struct Qdisc *sch, unsigned long arg)
308 return; 306 return;
309} 307}
310 308
311static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
312 struct nlattr **tca, unsigned long *arg)
313{
314 return -ENOSYS;
315}
316
317static int red_delete(struct Qdisc *sch, unsigned long cl)
318{
319 return -ENOSYS;
320}
321
322static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) 309static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
323{ 310{
324 if (!walker->stop) { 311 if (!walker->stop) {
@@ -331,20 +318,12 @@ static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
331 } 318 }
332} 319}
333 320
334static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
335{
336 return NULL;
337}
338
339static const struct Qdisc_class_ops red_class_ops = { 321static const struct Qdisc_class_ops red_class_ops = {
340 .graft = red_graft, 322 .graft = red_graft,
341 .leaf = red_leaf, 323 .leaf = red_leaf,
342 .get = red_get, 324 .get = red_get,
343 .put = red_put, 325 .put = red_put,
344 .change = red_change_class,
345 .delete = red_delete,
346 .walk = red_walk, 326 .walk = red_walk,
347 .tcf_chain = red_find_tcf,
348 .dump = red_dump_class, 327 .dump = red_dump_class,
349}; 328};
350 329
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8706920a6d45..cb21380c0605 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -496,12 +496,6 @@ nla_put_failure:
496 return -1; 496 return -1;
497} 497}
498 498
499static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
500 struct nlattr **tca, unsigned long *arg)
501{
502 return -EOPNOTSUPP;
503}
504
505static unsigned long sfq_get(struct Qdisc *sch, u32 classid) 499static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
506{ 500{
507 return 0; 501 return 0;
@@ -560,7 +554,6 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
560 554
561static const struct Qdisc_class_ops sfq_class_ops = { 555static const struct Qdisc_class_ops sfq_class_ops = {
562 .get = sfq_get, 556 .get = sfq_get,
563 .change = sfq_change_class,
564 .tcf_chain = sfq_find_tcf, 557 .tcf_chain = sfq_find_tcf,
565 .dump = sfq_dump_class, 558 .dump = sfq_dump_class,
566 .dump_stats = sfq_dump_class_stats, 559 .dump_stats = sfq_dump_class_stats,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index e22dfe85e43e..8fb8107ab188 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -368,9 +368,6 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
368{ 368{
369 struct tbf_sched_data *q = qdisc_priv(sch); 369 struct tbf_sched_data *q = qdisc_priv(sch);
370 370
371 if (cl != 1) /* only one class */
372 return -ENOENT;
373
374 tcm->tcm_handle |= TC_H_MIN(1); 371 tcm->tcm_handle |= TC_H_MIN(1);
375 tcm->tcm_info = q->qdisc->handle; 372 tcm->tcm_info = q->qdisc->handle;
376 373
@@ -410,17 +407,6 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg)
410{ 407{
411} 408}
412 409
413static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
414 struct nlattr **tca, unsigned long *arg)
415{
416 return -ENOSYS;
417}
418
419static int tbf_delete(struct Qdisc *sch, unsigned long arg)
420{
421 return -ENOSYS;
422}
423
424static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) 410static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
425{ 411{
426 if (!walker->stop) { 412 if (!walker->stop) {
@@ -433,21 +419,13 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
433 } 419 }
434} 420}
435 421
436static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
437{
438 return NULL;
439}
440
441static const struct Qdisc_class_ops tbf_class_ops = 422static const struct Qdisc_class_ops tbf_class_ops =
442{ 423{
443 .graft = tbf_graft, 424 .graft = tbf_graft,
444 .leaf = tbf_leaf, 425 .leaf = tbf_leaf,
445 .get = tbf_get, 426 .get = tbf_get,
446 .put = tbf_put, 427 .put = tbf_put,
447 .change = tbf_change_class,
448 .delete = tbf_delete,
449 .walk = tbf_walk, 428 .walk = tbf_walk,
450 .tcf_chain = tbf_find_tcf,
451 .dump = tbf_dump_class, 429 .dump = tbf_dump_class,
452}; 430};
453 431
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 12434b6c2042..5a002c247231 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -268,7 +268,7 @@ static inline int teql_resolve(struct sk_buff *skb,
268 return __teql_resolve(skb, skb_res, dev); 268 return __teql_resolve(skb, skb_res, dev);
269} 269}
270 270
271static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 271static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
272{ 272{
273 struct teql_master *master = netdev_priv(dev); 273 struct teql_master *master = netdev_priv(dev);
274 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 274 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
@@ -307,7 +307,7 @@ restart:
307 307
308 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
309 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
310 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
311 txq_trans_update(slave_txq); 311 txq_trans_update(slave_txq);
312 __netif_tx_unlock(slave_txq); 312 __netif_tx_unlock(slave_txq);
313 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 525864bf4f07..8450960df24f 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -112,6 +112,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
112 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) 112 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
113 * 1000; 113 * 1000;
114 asoc->frag_point = 0; 114 asoc->frag_point = 0;
115 asoc->user_frag = sp->user_frag;
115 116
116 /* Set the association max_retrans and RTO values from the 117 /* Set the association max_retrans and RTO values from the
117 * socket values. 118 * socket values.
@@ -202,6 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
202 asoc->a_rwnd = asoc->rwnd; 203 asoc->a_rwnd = asoc->rwnd;
203 204
204 asoc->rwnd_over = 0; 205 asoc->rwnd_over = 0;
206 asoc->rwnd_press = 0;
205 207
206 /* Use my own max window until I learn something better. */ 208 /* Use my own max window until I learn something better. */
207 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; 209 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
@@ -582,6 +584,33 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
582 asoc->addip_last_asconf->transport == peer) 584 asoc->addip_last_asconf->transport == peer)
583 asoc->addip_last_asconf->transport = NULL; 585 asoc->addip_last_asconf->transport = NULL;
584 586
587 /* If we have something on the transmitted list, we have to
588 * save it off. The best place is the active path.
589 */
590 if (!list_empty(&peer->transmitted)) {
591 struct sctp_transport *active = asoc->peer.active_path;
592 struct sctp_chunk *ch;
593
594 /* Reset the transport of each chunk on this list */
595 list_for_each_entry(ch, &peer->transmitted,
596 transmitted_list) {
597 ch->transport = NULL;
598 ch->rtt_in_progress = 0;
599 }
600
601 list_splice_tail_init(&peer->transmitted,
602 &active->transmitted);
603
604 /* Start a T3 timer here in case it wasn't running so
605 * that these migrated packets have a chance to get
606 * retrnasmitted.
607 */
608 if (!timer_pending(&active->T3_rtx_timer))
609 if (!mod_timer(&active->T3_rtx_timer,
610 jiffies + active->rto))
611 sctp_transport_hold(active);
612 }
613
585 asoc->peer.transport_count--; 614 asoc->peer.transport_count--;
586 615
587 sctp_transport_free(peer); 616 sctp_transport_free(peer);
@@ -651,13 +680,15 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
651 */ 680 */
652 peer->param_flags = asoc->param_flags; 681 peer->param_flags = asoc->param_flags;
653 682
683 sctp_transport_route(peer, NULL, sp);
684
654 /* Initialize the pmtu of the transport. */ 685 /* Initialize the pmtu of the transport. */
655 if (peer->param_flags & SPP_PMTUD_ENABLE) 686 if (peer->param_flags & SPP_PMTUD_DISABLE) {
656 sctp_transport_pmtu(peer); 687 if (asoc->pathmtu)
657 else if (asoc->pathmtu) 688 peer->pathmtu = asoc->pathmtu;
658 peer->pathmtu = asoc->pathmtu; 689 else
659 else 690 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
660 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 691 }
661 692
662 /* If this is the first transport addr on this association, 693 /* If this is the first transport addr on this association,
663 * initialize the association PMTU to the peer's PMTU. 694 * initialize the association PMTU to the peer's PMTU.
@@ -673,7 +704,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
673 "%d\n", asoc, asoc->pathmtu); 704 "%d\n", asoc, asoc->pathmtu);
674 peer->pmtu_pending = 0; 705 peer->pmtu_pending = 0;
675 706
676 asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); 707 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
677 708
678 /* The asoc->peer.port might not be meaningful yet, but 709 /* The asoc->peer.port might not be meaningful yet, but
679 * initialize the packet structure anyway. 710 * initialize the packet structure anyway.
@@ -810,11 +841,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
810 break; 841 break;
811 842
812 case SCTP_TRANSPORT_DOWN: 843 case SCTP_TRANSPORT_DOWN:
813 /* if the transort was never confirmed, do not transition it 844 /* If the transport was never confirmed, do not transition it
814 * to inactive state. 845 * to inactive state. Also, release the cached route since
846 * there may be a better route next time.
815 */ 847 */
816 if (transport->state != SCTP_UNCONFIRMED) 848 if (transport->state != SCTP_UNCONFIRMED)
817 transport->state = SCTP_INACTIVE; 849 transport->state = SCTP_INACTIVE;
850 else {
851 dst_release(transport->dst);
852 transport->dst = NULL;
853 }
818 854
819 spc_state = SCTP_ADDR_UNREACHABLE; 855 spc_state = SCTP_ADDR_UNREACHABLE;
820 break; 856 break;
@@ -1324,9 +1360,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1324 } 1360 }
1325 1361
1326 if (pmtu) { 1362 if (pmtu) {
1327 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
1328 asoc->pathmtu = pmtu; 1363 asoc->pathmtu = pmtu;
1329 asoc->frag_point = sctp_frag_point(sp, pmtu); 1364 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1330 } 1365 }
1331 1366
1332 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", 1367 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
@@ -1369,6 +1404,17 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1369 asoc->rwnd += len; 1404 asoc->rwnd += len;
1370 } 1405 }
1371 1406
1407 /* If we had window pressure, start recovering it
1408 * once our rwnd had reached the accumulated pressure
1409 * threshold. The idea is to recover slowly, but up
1410 * to the initial advertised window.
1411 */
1412 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1413 int change = min(asoc->pathmtu, asoc->rwnd_press);
1414 asoc->rwnd += change;
1415 asoc->rwnd_press -= change;
1416 }
1417
1372 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " 1418 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1373 "- %u\n", __func__, asoc, len, asoc->rwnd, 1419 "- %u\n", __func__, asoc, len, asoc->rwnd,
1374 asoc->rwnd_over, asoc->a_rwnd); 1420 asoc->rwnd_over, asoc->a_rwnd);
@@ -1401,17 +1447,38 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1401/* Decrease asoc's rwnd by len. */ 1447/* Decrease asoc's rwnd by len. */
1402void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) 1448void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1403{ 1449{
1450 int rx_count;
1451 int over = 0;
1452
1404 SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); 1453 SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1405 SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); 1454 SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1455
1456 if (asoc->ep->rcvbuf_policy)
1457 rx_count = atomic_read(&asoc->rmem_alloc);
1458 else
1459 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1460
1461 /* If we've reached or overflowed our receive buffer, announce
1462 * a 0 rwnd if rwnd would still be positive. Store the
1463 * the pottential pressure overflow so that the window can be restored
1464 * back to original value.
1465 */
1466 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1467 over = 1;
1468
1406 if (asoc->rwnd >= len) { 1469 if (asoc->rwnd >= len) {
1407 asoc->rwnd -= len; 1470 asoc->rwnd -= len;
1471 if (over) {
1472 asoc->rwnd_press = asoc->rwnd;
1473 asoc->rwnd = 0;
1474 }
1408 } else { 1475 } else {
1409 asoc->rwnd_over = len - asoc->rwnd; 1476 asoc->rwnd_over = len - asoc->rwnd;
1410 asoc->rwnd = 0; 1477 asoc->rwnd = 0;
1411 } 1478 }
1412 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", 1479 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1413 __func__, asoc, len, asoc->rwnd, 1480 __func__, asoc, len, asoc->rwnd,
1414 asoc->rwnd_over); 1481 asoc->rwnd_over, asoc->rwnd_press);
1415} 1482}
1416 1483
1417/* Build the bind address list for the association based on info from the 1484/* Build the bind address list for the association based on info from the
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 6d5944a745d4..13a6fba41077 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -510,9 +510,28 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
510 * of requested destination address, sender and receiver 510 * of requested destination address, sender and receiver
511 * SHOULD include all of its addresses with level greater 511 * SHOULD include all of its addresses with level greater
512 * than or equal to L. 512 * than or equal to L.
513 *
514 * Address scoping can be selectively controlled via sysctl
515 * option
513 */ 516 */
514 if (addr_scope <= scope) 517 switch (sctp_scope_policy) {
518 case SCTP_SCOPE_POLICY_DISABLE:
515 return 1; 519 return 1;
520 case SCTP_SCOPE_POLICY_ENABLE:
521 if (addr_scope <= scope)
522 return 1;
523 break;
524 case SCTP_SCOPE_POLICY_PRIVATE:
525 if (addr_scope <= scope || SCTP_SCOPE_PRIVATE == addr_scope)
526 return 1;
527 break;
528 case SCTP_SCOPE_POLICY_LINK:
529 if (addr_scope <= scope || SCTP_SCOPE_LINK == addr_scope)
530 return 1;
531 break;
532 default:
533 break;
534 }
516 535
517 return 0; 536 return 0;
518} 537}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 1748ef90950c..acf7c4d128f7 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -59,6 +59,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
59 msg->can_abandon = 0; 59 msg->can_abandon = 0;
60 msg->expires_at = 0; 60 msg->expires_at = 0;
61 INIT_LIST_HEAD(&msg->chunks); 61 INIT_LIST_HEAD(&msg->chunks);
62 msg->msg_size = 0;
62} 63}
63 64
64/* Allocate and initialize datamsg. */ 65/* Allocate and initialize datamsg. */
@@ -73,6 +74,19 @@ SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
73 return msg; 74 return msg;
74} 75}
75 76
77void sctp_datamsg_free(struct sctp_datamsg *msg)
78{
79 struct sctp_chunk *chunk;
80
81 /* This doesn't have to be a _safe vairant because
82 * sctp_chunk_free() only drops the refs.
83 */
84 list_for_each_entry(chunk, &msg->chunks, frag_list)
85 sctp_chunk_free(chunk);
86
87 sctp_datamsg_put(msg);
88}
89
76/* Final destructruction of datamsg memory. */ 90/* Final destructruction of datamsg memory. */
77static void sctp_datamsg_destroy(struct sctp_datamsg *msg) 91static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
78{ 92{
@@ -142,6 +156,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
142{ 156{
143 sctp_datamsg_hold(msg); 157 sctp_datamsg_hold(msg);
144 chunk->msg = msg; 158 chunk->msg = msg;
159 msg->msg_size += chunk->skb->len;
145} 160}
146 161
147 162
@@ -158,6 +173,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
158{ 173{
159 int max, whole, i, offset, over, err; 174 int max, whole, i, offset, over, err;
160 int len, first_len; 175 int len, first_len;
176 int max_data;
161 struct sctp_chunk *chunk; 177 struct sctp_chunk *chunk;
162 struct sctp_datamsg *msg; 178 struct sctp_datamsg *msg;
163 struct list_head *pos, *temp; 179 struct list_head *pos, *temp;
@@ -179,8 +195,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
179 __func__, msg, msg->expires_at, jiffies); 195 __func__, msg, msg->expires_at, jiffies);
180 } 196 }
181 197
182 max = asoc->frag_point; 198 /* This is the biggest possible DATA chunk that can fit into
199 * the packet
200 */
201 max_data = asoc->pathmtu -
202 sctp_sk(asoc->base.sk)->pf->af->net_header_len -
203 sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
183 204
205 max = asoc->frag_point;
184 /* If the the peer requested that we authenticate DATA chunks 206 /* If the the peer requested that we authenticate DATA chunks
185 * we need to accound for bundling of the AUTH chunks along with 207 * we need to accound for bundling of the AUTH chunks along with
186 * DATA. 208 * DATA.
@@ -189,23 +211,41 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
189 struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); 211 struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
190 212
191 if (hmac_desc) 213 if (hmac_desc)
192 max -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + 214 max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
193 hmac_desc->hmac_len); 215 hmac_desc->hmac_len);
194 } 216 }
195 217
218 /* Now, check if we need to reduce our max */
219 if (max > max_data)
220 max = max_data;
221
196 whole = 0; 222 whole = 0;
197 first_len = max; 223 first_len = max;
198 224
225 /* Check to see if we have a pending SACK and try to let it be bundled
226 * with this message. Do this if we don't have any data queued already.
227 * To check that, look at out_qlen and retransmit list.
228 * NOTE: we will not reduce to account for SACK, if the message would
229 * not have been fragmented.
230 */
231 if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
232 asoc->outqueue.out_qlen == 0 &&
233 list_empty(&asoc->outqueue.retransmit) &&
234 msg_len > max)
235 max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t));
236
199 /* Encourage Cookie-ECHO bundling. */ 237 /* Encourage Cookie-ECHO bundling. */
200 if (asoc->state < SCTP_STATE_COOKIE_ECHOED) { 238 if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
201 whole = msg_len / (max - SCTP_ARBITRARY_COOKIE_ECHO_LEN); 239 max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
202 240
203 /* Account for the DATA to be bundled with the COOKIE-ECHO. */ 241 /* Now that we adjusted completely, reset first_len */
204 if (whole) { 242 if (first_len > max_data)
205 first_len = max - SCTP_ARBITRARY_COOKIE_ECHO_LEN; 243 first_len = max_data;
206 msg_len -= first_len; 244
207 whole = 1; 245 /* Account for a different sized first fragment */
208 } 246 if (msg_len >= first_len) {
247 msg_len -= first_len;
248 whole = 1;
209 } 249 }
210 250
211 /* How many full sized? How many bytes leftover? */ 251 /* How many full sized? How many bytes leftover? */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b94c21190566..5cbda8f1ddfd 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -61,8 +61,24 @@
61#include <net/sctp/checksum.h> 61#include <net/sctp/checksum.h>
62 62
63/* Forward declarations for private helpers. */ 63/* Forward declarations for private helpers. */
64static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, 64static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
65 struct sctp_chunk *chunk); 65 struct sctp_chunk *chunk);
66static void sctp_packet_append_data(struct sctp_packet *packet,
67 struct sctp_chunk *chunk);
68static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
69 struct sctp_chunk *chunk,
70 u16 chunk_len);
71
72static void sctp_packet_reset(struct sctp_packet *packet)
73{
74 packet->size = packet->overhead;
75 packet->has_cookie_echo = 0;
76 packet->has_sack = 0;
77 packet->has_data = 0;
78 packet->has_auth = 0;
79 packet->ipfragok = 0;
80 packet->auth = NULL;
81}
66 82
67/* Config a packet. 83/* Config a packet.
68 * This appears to be a followup set of initializations. 84 * This appears to be a followup set of initializations.
@@ -75,13 +91,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
75 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, 91 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
76 packet, vtag); 92 packet, vtag);
77 93
94 sctp_packet_reset(packet);
78 packet->vtag = vtag; 95 packet->vtag = vtag;
79 packet->has_cookie_echo = 0;
80 packet->has_sack = 0;
81 packet->has_auth = 0;
82 packet->has_data = 0;
83 packet->ipfragok = 0;
84 packet->auth = NULL;
85 96
86 if (ecn_capable && sctp_packet_empty(packet)) { 97 if (ecn_capable && sctp_packet_empty(packet)) {
87 chunk = sctp_get_ecne_prepend(packet->transport->asoc); 98 chunk = sctp_get_ecne_prepend(packet->transport->asoc);
@@ -119,15 +130,9 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
119 } 130 }
120 overhead += sizeof(struct sctphdr); 131 overhead += sizeof(struct sctphdr);
121 packet->overhead = overhead; 132 packet->overhead = overhead;
122 packet->size = overhead; 133 sctp_packet_reset(packet);
123 packet->vtag = 0; 134 packet->vtag = 0;
124 packet->has_cookie_echo = 0;
125 packet->has_sack = 0;
126 packet->has_auth = 0;
127 packet->has_data = 0;
128 packet->ipfragok = 0;
129 packet->malloced = 0; 135 packet->malloced = 0;
130 packet->auth = NULL;
131 return packet; 136 return packet;
132} 137}
133 138
@@ -204,7 +209,7 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
204 /* See if this is an auth chunk we are bundling or if 209 /* See if this is an auth chunk we are bundling or if
205 * auth is already bundled. 210 * auth is already bundled.
206 */ 211 */
207 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->auth) 212 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
208 return retval; 213 return retval;
209 214
210 /* if the peer did not request this chunk to be authenticated, 215 /* if the peer did not request this chunk to be authenticated,
@@ -234,18 +239,19 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
234 if (sctp_chunk_is_data(chunk) && !pkt->has_sack && 239 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
235 !pkt->has_cookie_echo) { 240 !pkt->has_cookie_echo) {
236 struct sctp_association *asoc; 241 struct sctp_association *asoc;
242 struct timer_list *timer;
237 asoc = pkt->transport->asoc; 243 asoc = pkt->transport->asoc;
244 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
238 245
239 if (asoc->a_rwnd > asoc->rwnd) { 246 /* If the SACK timer is running, we have a pending SACK */
247 if (timer_pending(timer)) {
240 struct sctp_chunk *sack; 248 struct sctp_chunk *sack;
241 asoc->a_rwnd = asoc->rwnd; 249 asoc->a_rwnd = asoc->rwnd;
242 sack = sctp_make_sack(asoc); 250 sack = sctp_make_sack(asoc);
243 if (sack) { 251 if (sack) {
244 struct timer_list *timer;
245 retval = sctp_packet_append_chunk(pkt, sack); 252 retval = sctp_packet_append_chunk(pkt, sack);
246 asoc->peer.sack_needed = 0; 253 asoc->peer.sack_needed = 0;
247 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 254 if (del_timer(timer))
248 if (timer_pending(timer) && del_timer(timer))
249 sctp_association_put(asoc); 255 sctp_association_put(asoc);
250 } 256 }
251 } 257 }
@@ -261,13 +267,20 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
261{ 267{
262 sctp_xmit_t retval = SCTP_XMIT_OK; 268 sctp_xmit_t retval = SCTP_XMIT_OK;
263 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); 269 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
264 size_t psize;
265 size_t pmtu;
266 int too_big;
267 270
268 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, 271 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
269 chunk); 272 chunk);
270 273
274 /* Data chunks are special. Before seeing what else we can
275 * bundle into this packet, check to see if we are allowed to
276 * send this DATA.
277 */
278 if (sctp_chunk_is_data(chunk)) {
279 retval = sctp_packet_can_append_data(packet, chunk);
280 if (retval != SCTP_XMIT_OK)
281 goto finish;
282 }
283
271 /* Try to bundle AUTH chunk */ 284 /* Try to bundle AUTH chunk */
272 retval = sctp_packet_bundle_auth(packet, chunk); 285 retval = sctp_packet_bundle_auth(packet, chunk);
273 if (retval != SCTP_XMIT_OK) 286 if (retval != SCTP_XMIT_OK)
@@ -278,51 +291,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
278 if (retval != SCTP_XMIT_OK) 291 if (retval != SCTP_XMIT_OK)
279 goto finish; 292 goto finish;
280 293
281 psize = packet->size; 294 /* Check to see if this chunk will fit into the packet */
282 pmtu = ((packet->transport->asoc) ? 295 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
283 (packet->transport->asoc->pathmtu) : 296 if (retval != SCTP_XMIT_OK)
284 (packet->transport->pathmtu)); 297 goto finish;
285
286 too_big = (psize + chunk_len > pmtu);
287
288 /* Decide if we need to fragment or resubmit later. */
289 if (too_big) {
290 /* It's OK to fragmet at IP level if any one of the following
291 * is true:
292 * 1. The packet is empty (meaning this chunk is greater
293 * the MTU)
294 * 2. The chunk we are adding is a control chunk
295 * 3. The packet doesn't have any data in it yet and data
296 * requires authentication.
297 */
298 if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
299 (!packet->has_data && chunk->auth)) {
300 /* We no longer do re-fragmentation.
301 * Just fragment at the IP layer, if we
302 * actually hit this condition
303 */
304 packet->ipfragok = 1;
305 goto append;
306
307 } else {
308 retval = SCTP_XMIT_PMTU_FULL;
309 goto finish;
310 }
311 }
312
313append:
314 /* We believe that this chunk is OK to add to the packet (as
315 * long as we have the cwnd for it).
316 */
317 298
318 /* DATA is a special case since we must examine both rwnd and cwnd 299 /* We believe that this chunk is OK to add to the packet */
319 * before we send DATA.
320 */
321 switch (chunk->chunk_hdr->type) { 300 switch (chunk->chunk_hdr->type) {
322 case SCTP_CID_DATA: 301 case SCTP_CID_DATA:
323 retval = sctp_packet_append_data(packet, chunk); 302 /* Account for the data being in the packet */
324 if (SCTP_XMIT_OK != retval) 303 sctp_packet_append_data(packet, chunk);
325 goto finish;
326 /* Disallow SACK bundling after DATA. */ 304 /* Disallow SACK bundling after DATA. */
327 packet->has_sack = 1; 305 packet->has_sack = 1;
328 /* Disallow AUTH bundling after DATA */ 306 /* Disallow AUTH bundling after DATA */
@@ -598,7 +576,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
598 (*tp->af_specific->sctp_xmit)(nskb, tp); 576 (*tp->af_specific->sctp_xmit)(nskb, tp);
599 577
600out: 578out:
601 packet->size = packet->overhead; 579 sctp_packet_reset(packet);
602 return err; 580 return err;
603no_route: 581no_route:
604 kfree_skb(nskb); 582 kfree_skb(nskb);
@@ -632,16 +610,15 @@ nomem:
632 * 2nd Level Abstractions 610 * 2nd Level Abstractions
633 ********************************************************************/ 611 ********************************************************************/
634 612
635/* This private function handles the specifics of appending DATA chunks. */ 613/* This private function check to see if a chunk can be added */
636static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, 614static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
637 struct sctp_chunk *chunk) 615 struct sctp_chunk *chunk)
638{ 616{
639 sctp_xmit_t retval = SCTP_XMIT_OK; 617 sctp_xmit_t retval = SCTP_XMIT_OK;
640 size_t datasize, rwnd, inflight; 618 size_t datasize, rwnd, inflight, flight_size;
641 struct sctp_transport *transport = packet->transport; 619 struct sctp_transport *transport = packet->transport;
642 __u32 max_burst_bytes; 620 __u32 max_burst_bytes;
643 struct sctp_association *asoc = transport->asoc; 621 struct sctp_association *asoc = transport->asoc;
644 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
645 struct sctp_outq *q = &asoc->outqueue; 622 struct sctp_outq *q = &asoc->outqueue;
646 623
647 /* RFC 2960 6.1 Transmission of DATA Chunks 624 /* RFC 2960 6.1 Transmission of DATA Chunks
@@ -658,7 +635,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
658 */ 635 */
659 636
660 rwnd = asoc->peer.rwnd; 637 rwnd = asoc->peer.rwnd;
661 inflight = asoc->outqueue.outstanding_bytes; 638 inflight = q->outstanding_bytes;
639 flight_size = transport->flight_size;
662 640
663 datasize = sctp_data_size(chunk); 641 datasize = sctp_data_size(chunk);
664 642
@@ -681,8 +659,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
681 * cwnd = flightsize + Max.Burst * MTU 659 * cwnd = flightsize + Max.Burst * MTU
682 */ 660 */
683 max_burst_bytes = asoc->max_burst * asoc->pathmtu; 661 max_burst_bytes = asoc->max_burst * asoc->pathmtu;
684 if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { 662 if ((flight_size + max_burst_bytes) < transport->cwnd) {
685 transport->cwnd = transport->flight_size + max_burst_bytes; 663 transport->cwnd = flight_size + max_burst_bytes;
686 SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " 664 SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
687 "transport: %p, cwnd: %d, " 665 "transport: %p, cwnd: %d, "
688 "ssthresh: %d, flight_size: %d, " 666 "ssthresh: %d, flight_size: %d, "
@@ -707,7 +685,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
707 * ignore the value of cwnd and SHOULD NOT delay retransmission. 685 * ignore the value of cwnd and SHOULD NOT delay retransmission.
708 */ 686 */
709 if (chunk->fast_retransmit != SCTP_NEED_FRTX) 687 if (chunk->fast_retransmit != SCTP_NEED_FRTX)
710 if (transport->flight_size >= transport->cwnd) { 688 if (flight_size >= transport->cwnd) {
711 retval = SCTP_XMIT_RWND_FULL; 689 retval = SCTP_XMIT_RWND_FULL;
712 goto finish; 690 goto finish;
713 } 691 }
@@ -717,20 +695,36 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
717 * if any previously transmitted data on the connection remains 695 * if any previously transmitted data on the connection remains
718 * unacknowledged. 696 * unacknowledged.
719 */ 697 */
720 if (!sp->nodelay && sctp_packet_empty(packet) && 698 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
721 q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) { 699 inflight && sctp_state(asoc, ESTABLISHED)) {
722 unsigned len = datasize + q->out_qlen; 700 unsigned max = transport->pathmtu - packet->overhead;
701 unsigned len = chunk->skb->len + q->out_qlen;
723 702
724 /* Check whether this chunk and all the rest of pending 703 /* Check whether this chunk and all the rest of pending
725 * data will fit or delay in hopes of bundling a full 704 * data will fit or delay in hopes of bundling a full
726 * sized packet. 705 * sized packet.
706 * Don't delay large message writes that may have been
707 * fragmeneted into small peices.
727 */ 708 */
728 if (len < asoc->frag_point) { 709 if ((len < max) && (chunk->msg->msg_size < max)) {
729 retval = SCTP_XMIT_NAGLE_DELAY; 710 retval = SCTP_XMIT_NAGLE_DELAY;
730 goto finish; 711 goto finish;
731 } 712 }
732 } 713 }
733 714
715finish:
716 return retval;
717}
718
719/* This private function does management things when adding DATA chunk */
720static void sctp_packet_append_data(struct sctp_packet *packet,
721 struct sctp_chunk *chunk)
722{
723 struct sctp_transport *transport = packet->transport;
724 size_t datasize = sctp_data_size(chunk);
725 struct sctp_association *asoc = transport->asoc;
726 u32 rwnd = asoc->peer.rwnd;
727
734 /* Keep track of how many bytes are in flight over this transport. */ 728 /* Keep track of how many bytes are in flight over this transport. */
735 transport->flight_size += datasize; 729 transport->flight_size += datasize;
736 730
@@ -753,7 +747,45 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
753 /* Has been accepted for transmission. */ 747 /* Has been accepted for transmission. */
754 if (!asoc->peer.prsctp_capable) 748 if (!asoc->peer.prsctp_capable)
755 chunk->msg->can_abandon = 0; 749 chunk->msg->can_abandon = 0;
750}
751
752static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
753 struct sctp_chunk *chunk,
754 u16 chunk_len)
755{
756 size_t psize;
757 size_t pmtu;
758 int too_big;
759 sctp_xmit_t retval = SCTP_XMIT_OK;
760
761 psize = packet->size;
762 pmtu = ((packet->transport->asoc) ?
763 (packet->transport->asoc->pathmtu) :
764 (packet->transport->pathmtu));
765
766 too_big = (psize + chunk_len > pmtu);
767
768 /* Decide if we need to fragment or resubmit later. */
769 if (too_big) {
770 /* It's OK to fragmet at IP level if any one of the following
771 * is true:
772 * 1. The packet is empty (meaning this chunk is greater
773 * the MTU)
774 * 2. The chunk we are adding is a control chunk
775 * 3. The packet doesn't have any data in it yet and data
776 * requires authentication.
777 */
778 if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
779 (!packet->has_data && chunk->auth)) {
780 /* We no longer do re-fragmentation.
781 * Just fragment at the IP layer, if we
782 * actually hit this condition
783 */
784 packet->ipfragok = 1;
785 } else {
786 retval = SCTP_XMIT_PMTU_FULL;
787 }
788 }
756 789
757finish:
758 return retval; 790 return retval;
759} 791}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d765fc53e74d..c9f20e28521b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -406,8 +406,9 @@ void sctp_retransmit_mark(struct sctp_outq *q,
406 * not be retransmitted 406 * not be retransmitted
407 */ 407 */
408 if (!chunk->tsn_gap_acked) { 408 if (!chunk->tsn_gap_acked) {
409 chunk->transport->flight_size -= 409 if (chunk->transport)
410 sctp_data_size(chunk); 410 chunk->transport->flight_size -=
411 sctp_data_size(chunk);
411 q->outstanding_bytes -= sctp_data_size(chunk); 412 q->outstanding_bytes -= sctp_data_size(chunk);
412 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 413 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
413 sizeof(struct sk_buff)); 414 sizeof(struct sk_buff));
@@ -443,7 +444,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
443 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 444 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
444 sizeof(struct sk_buff)); 445 sizeof(struct sk_buff));
445 q->outstanding_bytes -= sctp_data_size(chunk); 446 q->outstanding_bytes -= sctp_data_size(chunk);
446 transport->flight_size -= sctp_data_size(chunk); 447 if (chunk->transport)
448 transport->flight_size -= sctp_data_size(chunk);
447 449
448 /* sctpimpguide-05 Section 2.8.2 450 /* sctpimpguide-05 Section 2.8.2
449 * M5) If a T3-rtx timer expires, the 451 * M5) If a T3-rtx timer expires, the
@@ -1310,6 +1312,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1310 __u32 rtt; 1312 __u32 rtt;
1311 __u8 restart_timer = 0; 1313 __u8 restart_timer = 0;
1312 int bytes_acked = 0; 1314 int bytes_acked = 0;
1315 int migrate_bytes = 0;
1313 1316
1314 /* These state variables are for coherent debug output. --xguo */ 1317 /* These state variables are for coherent debug output. --xguo */
1315 1318
@@ -1343,8 +1346,9 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1343 * considering it as 'outstanding'. 1346 * considering it as 'outstanding'.
1344 */ 1347 */
1345 if (!tchunk->tsn_gap_acked) { 1348 if (!tchunk->tsn_gap_acked) {
1346 tchunk->transport->flight_size -= 1349 if (tchunk->transport)
1347 sctp_data_size(tchunk); 1350 tchunk->transport->flight_size -=
1351 sctp_data_size(tchunk);
1348 q->outstanding_bytes -= sctp_data_size(tchunk); 1352 q->outstanding_bytes -= sctp_data_size(tchunk);
1349 } 1353 }
1350 continue; 1354 continue;
@@ -1378,6 +1382,20 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1378 rtt); 1382 rtt);
1379 } 1383 }
1380 } 1384 }
1385
1386 /* If the chunk hasn't been marked as ACKED,
1387 * mark it and account bytes_acked if the
1388 * chunk had a valid transport (it will not
1389 * have a transport if ASCONF had deleted it
1390 * while DATA was outstanding).
1391 */
1392 if (!tchunk->tsn_gap_acked) {
1393 tchunk->tsn_gap_acked = 1;
1394 bytes_acked += sctp_data_size(tchunk);
1395 if (!tchunk->transport)
1396 migrate_bytes += sctp_data_size(tchunk);
1397 }
1398
1381 if (TSN_lte(tsn, sack_ctsn)) { 1399 if (TSN_lte(tsn, sack_ctsn)) {
1382 /* RFC 2960 6.3.2 Retransmission Timer Rules 1400 /* RFC 2960 6.3.2 Retransmission Timer Rules
1383 * 1401 *
@@ -1391,8 +1409,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1391 restart_timer = 1; 1409 restart_timer = 1;
1392 1410
1393 if (!tchunk->tsn_gap_acked) { 1411 if (!tchunk->tsn_gap_acked) {
1394 tchunk->tsn_gap_acked = 1;
1395 bytes_acked += sctp_data_size(tchunk);
1396 /* 1412 /*
1397 * SFR-CACC algorithm: 1413 * SFR-CACC algorithm:
1398 * 2) If the SACK contains gap acks 1414 * 2) If the SACK contains gap acks
@@ -1432,10 +1448,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1432 * older than that newly acknowledged DATA 1448 * older than that newly acknowledged DATA
1433 * chunk, are qualified as 'Stray DATA chunks'. 1449 * chunk, are qualified as 'Stray DATA chunks'.
1434 */ 1450 */
1435 if (!tchunk->tsn_gap_acked) {
1436 tchunk->tsn_gap_acked = 1;
1437 bytes_acked += sctp_data_size(tchunk);
1438 }
1439 list_add_tail(lchunk, &tlist); 1451 list_add_tail(lchunk, &tlist);
1440 } 1452 }
1441 1453
@@ -1491,7 +1503,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1491 tsn); 1503 tsn);
1492 tchunk->tsn_gap_acked = 0; 1504 tchunk->tsn_gap_acked = 0;
1493 1505
1494 bytes_acked -= sctp_data_size(tchunk); 1506 if (tchunk->transport)
1507 bytes_acked -= sctp_data_size(tchunk);
1495 1508
1496 /* RFC 2960 6.3.2 Retransmission Timer Rules 1509 /* RFC 2960 6.3.2 Retransmission Timer Rules
1497 * 1510 *
@@ -1561,6 +1574,14 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1561#endif /* SCTP_DEBUG */ 1574#endif /* SCTP_DEBUG */
1562 if (transport) { 1575 if (transport) {
1563 if (bytes_acked) { 1576 if (bytes_acked) {
1577 /* We may have counted DATA that was migrated
1578 * to this transport due to DEL-IP operation.
1579 * Subtract those bytes, since the were never
1580 * send on this transport and shouldn't be
1581 * credited to this transport.
1582 */
1583 bytes_acked -= migrate_bytes;
1584
1564 /* 8.2. When an outstanding TSN is acknowledged, 1585 /* 8.2. When an outstanding TSN is acknowledged,
1565 * the endpoint shall clear the error counter of 1586 * the endpoint shall clear the error counter of
1566 * the destination transport address to which the 1587 * the destination transport address to which the
@@ -1589,7 +1610,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1589 transport->flight_size -= bytes_acked; 1610 transport->flight_size -= bytes_acked;
1590 if (transport->flight_size == 0) 1611 if (transport->flight_size == 0)
1591 transport->partial_bytes_acked = 0; 1612 transport->partial_bytes_acked = 0;
1592 q->outstanding_bytes -= bytes_acked; 1613 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1593 } else { 1614 } else {
1594 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1615 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1595 * When a sender is doing zero window probing, it 1616 * When a sender is doing zero window probing, it
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index f268910620be..d093cbfeaac4 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -512,10 +512,8 @@ int __init sctp_remaddr_proc_init(void)
512{ 512{
513 struct proc_dir_entry *p; 513 struct proc_dir_entry *p;
514 514
515 p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp); 515 p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops);
516 if (!p) 516 if (!p)
517 return -ENOMEM; 517 return -ENOMEM;
518 p->proc_fops = &sctp_remaddr_seq_fops;
519
520 return 0; 518 return 0;
521} 519}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 79cbd47f4df7..60093be8385d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -160,6 +160,7 @@ static void sctp_proc_exit(void)
160 remove_proc_entry("sctp", init_net.proc_net); 160 remove_proc_entry("sctp", init_net.proc_net);
161 } 161 }
162#endif 162#endif
163 percpu_counter_destroy(&sctp_sockets_allocated);
163} 164}
164 165
165/* Private helper to extract ipv4 address and stash them in 166/* Private helper to extract ipv4 address and stash them in
@@ -430,16 +431,14 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
430 * of requested destination address, sender and receiver 431 * of requested destination address, sender and receiver
431 * SHOULD include all of its addresses with level greater 432 * SHOULD include all of its addresses with level greater
432 * than or equal to L. 433 * than or equal to L.
434 *
435 * IPv4 scoping can be controlled through sysctl option
436 * net.sctp.addr_scope_policy
433 */ 437 */
434static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) 438static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
435{ 439{
436 sctp_scope_t retval; 440 sctp_scope_t retval;
437 441
438 /* Should IPv4 scoping be a sysctl configurable option
439 * so users can turn it off (default on) for certain
440 * unconventional networking environments?
441 */
442
443 /* Check for unusable SCTP addresses. */ 442 /* Check for unusable SCTP addresses. */
444 if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { 443 if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) {
445 retval = SCTP_SCOPE_UNUSABLE; 444 retval = SCTP_SCOPE_UNUSABLE;
@@ -1258,6 +1257,9 @@ SCTP_STATIC __init int sctp_init(void)
1258 /* Disable AUTH by default. */ 1257 /* Disable AUTH by default. */
1259 sctp_auth_enable = 0; 1258 sctp_auth_enable = 0;
1260 1259
1260 /* Set SCOPE policy to enabled */
1261 sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
1262
1261 sctp_sysctl_register(); 1263 sctp_sysctl_register();
1262 1264
1263 INIT_LIST_HEAD(&sctp_address_families); 1265 INIT_LIST_HEAD(&sctp_address_families);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 61cc6075b0df..9d881a61ac02 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2861,6 +2861,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2861 addr_param = (union sctp_addr_param *) 2861 addr_param = (union sctp_addr_param *)
2862 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 2862 ((void *)asconf_param + sizeof(sctp_addip_param_t));
2863 2863
2864 if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
2865 asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
2866 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
2867 return SCTP_ERROR_UNKNOWN_PARAM;
2868
2864 switch (addr_param->v4.param_hdr.type) { 2869 switch (addr_param->v4.param_hdr.type) {
2865 case SCTP_PARAM_IPV6_ADDRESS: 2870 case SCTP_PARAM_IPV6_ADDRESS:
2866 if (!asoc->peer.ipv6_address) 2871 if (!asoc->peer.ipv6_address)
@@ -2958,9 +2963,6 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2958 2963
2959 sctp_assoc_set_primary(asoc, peer); 2964 sctp_assoc_set_primary(asoc, peer);
2960 break; 2965 break;
2961 default:
2962 return SCTP_ERROR_UNKNOWN_PARAM;
2963 break;
2964 } 2966 }
2965 2967
2966 return SCTP_ERROR_NO_ERROR; 2968 return SCTP_ERROR_NO_ERROR;
@@ -3104,7 +3106,7 @@ done:
3104} 3106}
3105 3107
3106/* Process a asconf parameter that is successfully acked. */ 3108/* Process a asconf parameter that is successfully acked. */
3107static int sctp_asconf_param_success(struct sctp_association *asoc, 3109static void sctp_asconf_param_success(struct sctp_association *asoc,
3108 sctp_addip_param_t *asconf_param) 3110 sctp_addip_param_t *asconf_param)
3109{ 3111{
3110 struct sctp_af *af; 3112 struct sctp_af *af;
@@ -3113,7 +3115,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
3113 union sctp_addr_param *addr_param; 3115 union sctp_addr_param *addr_param;
3114 struct sctp_transport *transport; 3116 struct sctp_transport *transport;
3115 struct sctp_sockaddr_entry *saddr; 3117 struct sctp_sockaddr_entry *saddr;
3116 int retval = 0;
3117 3118
3118 addr_param = (union sctp_addr_param *) 3119 addr_param = (union sctp_addr_param *)
3119 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 3120 ((void *)asconf_param + sizeof(sctp_addip_param_t));
@@ -3133,10 +3134,18 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
3133 saddr->state = SCTP_ADDR_SRC; 3134 saddr->state = SCTP_ADDR_SRC;
3134 } 3135 }
3135 local_bh_enable(); 3136 local_bh_enable();
3137 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3138 transports) {
3139 if (transport->state == SCTP_ACTIVE)
3140 continue;
3141 dst_release(transport->dst);
3142 sctp_transport_route(transport, NULL,
3143 sctp_sk(asoc->base.sk));
3144 }
3136 break; 3145 break;
3137 case SCTP_PARAM_DEL_IP: 3146 case SCTP_PARAM_DEL_IP:
3138 local_bh_disable(); 3147 local_bh_disable();
3139 retval = sctp_del_bind_addr(bp, &addr); 3148 sctp_del_bind_addr(bp, &addr);
3140 local_bh_enable(); 3149 local_bh_enable();
3141 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3150 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3142 transports) { 3151 transports) {
@@ -3148,8 +3157,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
3148 default: 3157 default:
3149 break; 3158 break;
3150 } 3159 }
3151
3152 return retval;
3153} 3160}
3154 3161
3155/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk 3162/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk
@@ -3266,7 +3273,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3266 3273
3267 switch (err_code) { 3274 switch (err_code) {
3268 case SCTP_ERROR_NO_ERROR: 3275 case SCTP_ERROR_NO_ERROR:
3269 retval = sctp_asconf_param_success(asoc, asconf_param); 3276 sctp_asconf_param_success(asoc, asconf_param);
3270 break; 3277 break;
3271 3278
3272 case SCTP_ERROR_RSRC_LOW: 3279 case SCTP_ERROR_RSRC_LOW:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 86426aac1600..8674d4919556 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -440,14 +440,26 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
440 /* The check for association's overall error counter exceeding the 440 /* The check for association's overall error counter exceeding the
441 * threshold is done in the state function. 441 * threshold is done in the state function.
442 */ 442 */
443 /* When probing UNCONFIRMED addresses, the association overall 443 /* We are here due to a timer expiration. If the timer was
444 * error count is NOT incremented 444 * not a HEARTBEAT, then normal error tracking is done.
445 * If the timer was a heartbeat, we only increment error counts
446 * when we already have an outstanding HEARTBEAT that has not
447 * been acknowledged.
448 * Additionaly, some tranport states inhibit error increments.
445 */ 449 */
446 if (transport->state != SCTP_UNCONFIRMED) 450 if (!is_hb) {
447 asoc->overall_error_count++; 451 asoc->overall_error_count++;
452 if (transport->state != SCTP_INACTIVE)
453 transport->error_count++;
454 } else if (transport->hb_sent) {
455 if (transport->state != SCTP_UNCONFIRMED)
456 asoc->overall_error_count++;
457 if (transport->state != SCTP_INACTIVE)
458 transport->error_count++;
459 }
448 460
449 if (transport->state != SCTP_INACTIVE && 461 if (transport->state != SCTP_INACTIVE &&
450 (transport->error_count++ >= transport->pathmaxrxt)) { 462 (transport->error_count > transport->pathmaxrxt)) {
451 SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", 463 SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
452 " transport IP: port:%d failed.\n", 464 " transport IP: port:%d failed.\n",
453 asoc, 465 asoc,
@@ -931,6 +943,27 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
931 943
932} 944}
933 945
946/* Send the whole message, chunk by chunk, to the outqueue.
947 * This way the whole message is queued up and bundling if
948 * encouraged for small fragments.
949 */
950static int sctp_cmd_send_msg(struct sctp_association *asoc,
951 struct sctp_datamsg *msg)
952{
953 struct sctp_chunk *chunk;
954 int error = 0;
955
956 list_for_each_entry(chunk, &msg->chunks, frag_list) {
957 error = sctp_outq_tail(&asoc->outqueue, chunk);
958 if (error)
959 break;
960 }
961
962 return error;
963}
964
965
966
934/* These three macros allow us to pull the debugging code out of the 967/* These three macros allow us to pull the debugging code out of the
935 * main flow of sctp_do_sm() to keep attention focused on the real 968 * main flow of sctp_do_sm() to keep attention focused on the real
936 * functionality there. 969 * functionality there.
@@ -1500,7 +1533,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1500 case SCTP_CMD_PROCESS_CTSN: 1533 case SCTP_CMD_PROCESS_CTSN:
1501 /* Dummy up a SACK for processing. */ 1534 /* Dummy up a SACK for processing. */
1502 sackh.cum_tsn_ack = cmd->obj.be32; 1535 sackh.cum_tsn_ack = cmd->obj.be32;
1503 sackh.a_rwnd = 0; 1536 sackh.a_rwnd = asoc->peer.rwnd +
1537 asoc->outqueue.outstanding_bytes;
1504 sackh.num_gap_ack_blocks = 0; 1538 sackh.num_gap_ack_blocks = 0;
1505 sackh.num_dup_tsns = 0; 1539 sackh.num_dup_tsns = 0;
1506 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, 1540 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
@@ -1575,7 +1609,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1575 case SCTP_CMD_UPDATE_INITTAG: 1609 case SCTP_CMD_UPDATE_INITTAG:
1576 asoc->peer.i.init_tag = cmd->obj.u32; 1610 asoc->peer.i.init_tag = cmd->obj.u32;
1577 break; 1611 break;
1578 1612 case SCTP_CMD_SEND_MSG:
1613 if (!asoc->outqueue.cork) {
1614 sctp_outq_cork(&asoc->outqueue);
1615 local_cork = 1;
1616 }
1617 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1618 break;
1579 default: 1619 default:
1580 printk(KERN_WARNING "Impossible command: %u, %p\n", 1620 printk(KERN_WARNING "Impossible command: %u, %p\n",
1581 cmd->verb, cmd->obj.ptr); 1621 cmd->verb, cmd->obj.ptr);
@@ -1593,9 +1633,9 @@ out:
1593 */ 1633 */
1594 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { 1634 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1595 if (chunk->end_of_packet || chunk->singleton) 1635 if (chunk->end_of_packet || chunk->singleton)
1596 sctp_outq_uncork(&asoc->outqueue); 1636 error = sctp_outq_uncork(&asoc->outqueue);
1597 } else if (local_cork) 1637 } else if (local_cork)
1598 sctp_outq_uncork(&asoc->outqueue); 1638 error = sctp_outq_uncork(&asoc->outqueue);
1599 return error; 1639 return error;
1600nomem: 1640nomem:
1601 error = -ENOMEM; 1641 error = -ENOMEM;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7288192f7df5..c8fae1983dd1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -334,6 +334,15 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
334 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) 334 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
335 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 335 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
336 336
337 /* If the INIT is coming toward a closing socket, we'll send back
338 * and ABORT. Essentially, this catches the race of INIT being
339 * backloged to the socket at the same time as the user isses close().
340 * Since the socket and all its associations are going away, we
341 * can treat this OOTB
342 */
343 if (sctp_sstate(ep->base.sk, CLOSING))
344 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
345
337 /* Verify the INIT chunk before processing it. */ 346 /* Verify the INIT chunk before processing it. */
338 err_chunk = NULL; 347 err_chunk = NULL;
339 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, 348 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
@@ -962,7 +971,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
962{ 971{
963 struct sctp_transport *transport = (struct sctp_transport *) arg; 972 struct sctp_transport *transport = (struct sctp_transport *) arg;
964 973
965 if (asoc->overall_error_count > asoc->max_retrans) { 974 if (asoc->overall_error_count >= asoc->max_retrans) {
966 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 975 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
967 SCTP_ERROR(ETIMEDOUT)); 976 SCTP_ERROR(ETIMEDOUT));
968 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 977 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
@@ -1106,7 +1115,8 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1106 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1115 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
1107 1116
1108 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ 1117 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
1109 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) 1118 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
1119 sizeof(sctp_sender_hb_info_t)))
1110 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 1120 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
1111 commands); 1121 commands);
1112 1122
@@ -2561,6 +2571,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2561 chunk->subh.shutdown_hdr = sdh; 2571 chunk->subh.shutdown_hdr = sdh;
2562 ctsn = ntohl(sdh->cum_tsn_ack); 2572 ctsn = ntohl(sdh->cum_tsn_ack);
2563 2573
2574 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
2575 SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
2576 SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
2577 return SCTP_DISPOSITION_DISCARD;
2578 }
2579
2564 /* If Cumulative TSN Ack beyond the max tsn currently 2580 /* If Cumulative TSN Ack beyond the max tsn currently
2565 * send, terminating the association and respond to the 2581 * send, terminating the association and respond to the
2566 * sender with an ABORT. 2582 * sender with an ABORT.
@@ -2624,6 +2640,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2624{ 2640{
2625 struct sctp_chunk *chunk = arg; 2641 struct sctp_chunk *chunk = arg;
2626 sctp_shutdownhdr_t *sdh; 2642 sctp_shutdownhdr_t *sdh;
2643 __u32 ctsn;
2627 2644
2628 if (!sctp_vtag_verify(chunk, asoc)) 2645 if (!sctp_vtag_verify(chunk, asoc))
2629 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2646 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2635,12 +2652,19 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2635 commands); 2652 commands);
2636 2653
2637 sdh = (sctp_shutdownhdr_t *)chunk->skb->data; 2654 sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
2655 ctsn = ntohl(sdh->cum_tsn_ack);
2656
2657 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
2658 SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
2659 SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
2660 return SCTP_DISPOSITION_DISCARD;
2661 }
2638 2662
2639 /* If Cumulative TSN Ack beyond the max tsn currently 2663 /* If Cumulative TSN Ack beyond the max tsn currently
2640 * send, terminating the association and respond to the 2664 * send, terminating the association and respond to the
2641 * sender with an ABORT. 2665 * sender with an ABORT.
2642 */ 2666 */
2643 if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn)) 2667 if (!TSN_lt(ctsn, asoc->next_tsn))
2644 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); 2668 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
2645 2669
2646 /* verify, by checking the Cumulative TSN Ack field of the 2670 /* verify, by checking the Cumulative TSN Ack field of the
@@ -2867,6 +2891,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2867 goto discard_force; 2891 goto discard_force;
2868 case SCTP_IERROR_NO_DATA: 2892 case SCTP_IERROR_NO_DATA:
2869 goto consume; 2893 goto consume;
2894 case SCTP_IERROR_PROTO_VIOLATION:
2895 return sctp_sf_abort_violation(ep, asoc, chunk, commands,
2896 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
2870 default: 2897 default:
2871 BUG(); 2898 BUG();
2872 } 2899 }
@@ -2977,6 +3004,9 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
2977 break; 3004 break;
2978 case SCTP_IERROR_NO_DATA: 3005 case SCTP_IERROR_NO_DATA:
2979 goto consume; 3006 goto consume;
3007 case SCTP_IERROR_PROTO_VIOLATION:
3008 return sctp_sf_abort_violation(ep, asoc, chunk, commands,
3009 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
2980 default: 3010 default:
2981 BUG(); 3011 BUG();
2982 } 3012 }
@@ -3519,6 +3549,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3519 asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); 3549 asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
3520 if (!asconf_ack) 3550 if (!asconf_ack)
3521 return SCTP_DISPOSITION_DISCARD; 3551 return SCTP_DISPOSITION_DISCARD;
3552
3553 /* Reset the transport so that we select the correct one
3554 * this time around. This is to make sure that we don't
3555 * accidentally use a stale transport that's been removed.
3556 */
3557 asconf_ack->transport = NULL;
3522 } else { 3558 } else {
3523 /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since 3559 /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
3524 * it must be either a stale packet or from an attacker. 3560 * it must be either a stale packet or from an attacker.
@@ -4546,9 +4582,9 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
4546 void *arg, 4582 void *arg,
4547 sctp_cmd_seq_t *commands) 4583 sctp_cmd_seq_t *commands)
4548{ 4584{
4549 struct sctp_chunk *chunk = arg; 4585 struct sctp_datamsg *msg = arg;
4550 4586
4551 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); 4587 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
4552 return SCTP_DISPOSITION_CONSUME; 4588 return SCTP_DISPOSITION_CONSUME;
4553} 4589}
4554 4590
@@ -5847,6 +5883,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5847 __u32 tsn; 5883 __u32 tsn;
5848 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; 5884 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
5849 struct sock *sk = asoc->base.sk; 5885 struct sock *sk = asoc->base.sk;
5886 u16 ssn;
5887 u16 sid;
5888 u8 ordered = 0;
5850 5889
5851 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5890 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
5852 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); 5891 skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5986,8 +6025,10 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5986 */ 6025 */
5987 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 6026 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
5988 SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); 6027 SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
5989 else 6028 else {
5990 SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); 6029 SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
6030 ordered = 1;
6031 }
5991 6032
5992 /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number 6033 /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
5993 * 6034 *
@@ -5997,7 +6038,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5997 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) 6038 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
5998 * and discard the DATA chunk. 6039 * and discard the DATA chunk.
5999 */ 6040 */
6000 if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { 6041 sid = ntohs(data_hdr->stream);
6042 if (sid >= asoc->c.sinit_max_instreams) {
6001 /* Mark tsn as received even though we drop it */ 6043 /* Mark tsn as received even though we drop it */
6002 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6044 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6003 6045
@@ -6010,6 +6052,18 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6010 return SCTP_IERROR_BAD_STREAM; 6052 return SCTP_IERROR_BAD_STREAM;
6011 } 6053 }
6012 6054
6055 /* Check to see if the SSN is possible for this TSN.
6056 * The biggest gap we can record is 4K wide. Since SSNs wrap
6057 * at an unsigned short, there is no way that an SSN can
6058 * wrap and for a valid TSN. We can simply check if the current
6059 * SSN is smaller then the next expected one. If it is, it wrapped
6060 * and is invalid.
6061 */
6062 ssn = ntohs(data_hdr->ssn);
6063 if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
6064 return SCTP_IERROR_PROTO_VIOLATION;
6065 }
6066
6013 /* Send the data up to the user. Note: Schedule the 6067 /* Send the data up to the user. Note: Schedule the
6014 * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK 6068 * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
6015 * chunk needs the updated rwnd. 6069 * chunk needs the updated rwnd.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 971890dbfea0..89af37a6c871 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1361,6 +1361,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1361 1361
1362 sctp_lock_sock(sk); 1362 sctp_lock_sock(sk);
1363 sk->sk_shutdown = SHUTDOWN_MASK; 1363 sk->sk_shutdown = SHUTDOWN_MASK;
1364 sk->sk_state = SCTP_SS_CLOSING;
1364 1365
1365 ep = sctp_sk(sk)->ep; 1366 ep = sctp_sk(sk)->ep;
1366 1367
@@ -1813,20 +1814,22 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1813 sctp_set_owner_w(chunk); 1814 sctp_set_owner_w(chunk);
1814 1815
1815 chunk->transport = chunk_tp; 1816 chunk->transport = chunk_tp;
1816
1817 /* Send it to the lower layers. Note: all chunks
1818 * must either fail or succeed. The lower layer
1819 * works that way today. Keep it that way or this
1820 * breaks.
1821 */
1822 err = sctp_primitive_SEND(asoc, chunk);
1823 /* Did the lower layer accept the chunk? */
1824 if (err)
1825 sctp_chunk_free(chunk);
1826 SCTP_DEBUG_PRINTK("We sent primitively.\n");
1827 } 1817 }
1828 1818
1829 sctp_datamsg_put(datamsg); 1819 /* Send it to the lower layers. Note: all chunks
1820 * must either fail or succeed. The lower layer
1821 * works that way today. Keep it that way or this
1822 * breaks.
1823 */
1824 err = sctp_primitive_SEND(asoc, datamsg);
1825 /* Did the lower layer accept the chunk? */
1826 if (err)
1827 sctp_datamsg_free(datamsg);
1828 else
1829 sctp_datamsg_put(datamsg);
1830
1831 SCTP_DEBUG_PRINTK("We sent primitively.\n");
1832
1830 if (err) 1833 if (err)
1831 goto out_free; 1834 goto out_free;
1832 else 1835 else
@@ -2240,7 +2243,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2240 sctp_assoc_sync_pmtu(asoc); 2243 sctp_assoc_sync_pmtu(asoc);
2241 } else if (asoc) { 2244 } else if (asoc) {
2242 asoc->pathmtu = params->spp_pathmtu; 2245 asoc->pathmtu = params->spp_pathmtu;
2243 sctp_frag_point(sp, params->spp_pathmtu); 2246 sctp_frag_point(asoc, params->spp_pathmtu);
2244 } else { 2247 } else {
2245 sp->pathmtu = params->spp_pathmtu; 2248 sp->pathmtu = params->spp_pathmtu;
2246 } 2249 }
@@ -2877,15 +2880,10 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
2877 val -= sizeof(struct sctphdr) + 2880 val -= sizeof(struct sctphdr) +
2878 sizeof(struct sctp_data_chunk); 2881 sizeof(struct sctp_data_chunk);
2879 } 2882 }
2880 2883 asoc->user_frag = val;
2881 asoc->frag_point = val; 2884 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
2882 } else { 2885 } else {
2883 sp->user_frag = val; 2886 sp->user_frag = val;
2884
2885 /* Update the frag_point of the existing associations. */
2886 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
2887 asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
2888 }
2889 } 2887 }
2890 2888
2891 return 0; 2889 return 0;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 63eabbc71298..ab7151da120f 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -51,6 +51,7 @@ static int timer_max = 86400000; /* ms in one day */
51static int int_max = INT_MAX; 51static int int_max = INT_MAX;
52static int sack_timer_min = 1; 52static int sack_timer_min = 1;
53static int sack_timer_max = 500; 53static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
54 55
55extern int sysctl_sctp_mem[3]; 56extern int sysctl_sctp_mem[3];
56extern int sysctl_sctp_rmem[3]; 57extern int sysctl_sctp_rmem[3];
@@ -272,6 +273,17 @@ static ctl_table sctp_table[] = {
272 .proc_handler = proc_dointvec, 273 .proc_handler = proc_dointvec,
273 .strategy = sysctl_intvec 274 .strategy = sysctl_intvec
274 }, 275 },
276 {
277 .ctl_name = CTL_UNNUMBERED,
278 .procname = "addr_scope_policy",
279 .data = &sctp_scope_policy,
280 .maxlen = sizeof(int),
281 .mode = 0644,
282 .proc_handler = &proc_dointvec_minmax,
283 .strategy = &sysctl_intvec,
284 .extra1 = &zero,
285 .extra2 = &addr_scope_max,
286 },
275 { .ctl_name = 0 } 287 { .ctl_name = 0 }
276}; 288};
277 289
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index e5dde45c79d3..c256e4839316 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -503,6 +503,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
503 transport->ssthresh = max(transport->cwnd/2, 503 transport->ssthresh = max(transport->cwnd/2,
504 4*transport->asoc->pathmtu); 504 4*transport->asoc->pathmtu);
505 transport->cwnd = transport->asoc->pathmtu; 505 transport->cwnd = transport->asoc->pathmtu;
506
507 /* T3-rtx also clears fast recovery on the transport */
508 transport->fast_recovery = 0;
506 break; 509 break;
507 510
508 case SCTP_LOWER_CWND_FAST_RTX: 511 case SCTP_LOWER_CWND_FAST_RTX:
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5bc2f45bddf0..ebfcf9b89909 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -28,7 +28,6 @@
28#include <linux/kallsyms.h> 28#include <linux/kallsyms.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/smp_lock.h>
32#include <linux/utsname.h> 31#include <linux/utsname.h>
33#include <linux/workqueue.h> 32#include <linux/workqueue.h>
34#include <linux/in6.h> 33#include <linux/in6.h>
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 1102ce1251f7..8f459abe97cf 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -16,7 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/mempool.h> 17#include <linux/mempool.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/spinlock.h> 19#include <linux/spinlock.h>
21#include <linux/mutex.h> 20#include <linux/mutex.h>
22 21
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 6f33d33cc064..27d44332f017 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/smp_lock.h>
8#include <linux/errno.h> 9#include <linux/errno.h>
9#include <linux/freezer.h> 10#include <linux/freezer.h>
10#include <linux/kthread.h> 11#include <linux/kthread.h>
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a7a36779b9b3..327011fcc407 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -119,7 +119,7 @@ int tipc_register_media(u32 media_type,
119 warn("Media <%s> rejected, no broadcast address\n", name); 119 warn("Media <%s> rejected, no broadcast address\n", name);
120 goto exit; 120 goto exit;
121 } 121 }
122 if ((bearer_priority < TIPC_MIN_LINK_PRI) && 122 if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
123 (bearer_priority > TIPC_MAX_LINK_PRI)) { 123 (bearer_priority > TIPC_MAX_LINK_PRI)) {
124 warn("Media <%s> rejected, illegal priority (%u)\n", name, 124 warn("Media <%s> rejected, illegal priority (%u)\n", name,
125 bearer_priority); 125 bearer_priority);
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 466e2d22d256..258daa80ad92 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -48,6 +48,7 @@
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/module.h> /* support for loadable modules */ 49#include <linux/module.h> /* support for loadable modules */
50#include <linux/slab.h> /* kmalloc(), kfree() */ 50#include <linux/slab.h> /* kmalloc(), kfree() */
51#include <linux/smp_lock.h>
51#include <linux/mm.h> 52#include <linux/mm.h>
52#include <linux/string.h> /* inline mem*, str* functions */ 53#include <linux/string.h> /* inline mem*, str* functions */
53 54
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index c6031d5b135f..abf7ca3f9ff9 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,6 +1,15 @@
1config CFG80211 1config CFG80211
2 tristate "Improved wireless configuration API" 2 tristate "cfg80211 - wireless configuration API"
3 depends on RFKILL || !RFKILL 3 depends on RFKILL || !RFKILL
4 ---help---
5 cfg80211 is the Linux wireless LAN (802.11) configuration API.
6 Enable this if you have a wireless device.
7
8 For more information refer to documentation on the wireless wiki:
9
10 http://wireless.kernel.org/en/developers/Documentation/cfg80211
11
12 When built as a module it will be called cfg80211.
4 13
5config NL80211_TESTMODE 14config NL80211_TESTMODE
6 bool "nl80211 testmode command" 15 bool "nl80211 testmode command"
@@ -17,12 +26,33 @@ config NL80211_TESTMODE
17 26
18 Say N. 27 Say N.
19 28
29config CFG80211_DEVELOPER_WARNINGS
30 bool "enable developer warnings"
31 depends on CFG80211
32 default n
33 help
34 This option enables some additional warnings that help
35 cfg80211 developers and driver developers, but that can
36 trigger due to races with userspace.
37
38 For example, when a driver reports that it was disconnected
39 from the AP, but the user disconnects manually at the same
40 time, the warning might trigger spuriously due to races.
41
42 Say Y only if you are developing cfg80211 or a driver based
43 on it (or mac80211).
44
45
20config CFG80211_REG_DEBUG 46config CFG80211_REG_DEBUG
21 bool "cfg80211 regulatory debugging" 47 bool "cfg80211 regulatory debugging"
22 depends on CFG80211 48 depends on CFG80211
23 default n 49 default n
24 ---help--- 50 ---help---
25 You can enable this if you want to debug regulatory changes. 51 You can enable this if you want to debug regulatory changes.
52 For more information on cfg80211 regulatory refer to the wireless
53 wiki:
54
55 http://wireless.kernel.org/en/developers/Regulatory
26 56
27 If unsure, say N. 57 If unsure, say N.
28 58
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index d74cc77fa57a..3ecaa9179977 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o 5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o 6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
7 7
8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o sme.o 8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
9cfg80211-y += mlme.o ibss.o sme.o chan.o
9cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 10cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
10cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o wext-sme.o 11cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o wext-sme.o
11 12
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
new file mode 100644
index 000000000000..a46ac6c9b365
--- /dev/null
+++ b/net/wireless/chan.c
@@ -0,0 +1,89 @@
1/*
2 * This file contains helper code to handle channel
3 * settings and keeping track of what is possible at
4 * any point in time.
5 *
6 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
7 */
8
9#include <net/cfg80211.h>
10#include "core.h"
11
12struct ieee80211_channel *
13rdev_fixed_channel(struct cfg80211_registered_device *rdev,
14 struct wireless_dev *for_wdev)
15{
16 struct wireless_dev *wdev;
17 struct ieee80211_channel *result = NULL;
18
19 WARN_ON(!mutex_is_locked(&rdev->devlist_mtx));
20
21 list_for_each_entry(wdev, &rdev->netdev_list, list) {
22 if (wdev == for_wdev)
23 continue;
24
25 /*
26 * Lock manually to tell lockdep about allowed
27 * nesting here if for_wdev->mtx is held already.
28 * This is ok as it's all under the rdev devlist
29 * mutex and as such can only be done once at any
30 * given time.
31 */
32 mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING);
33 if (wdev->current_bss)
34 result = wdev->current_bss->pub.channel;
35 wdev_unlock(wdev);
36
37 if (result)
38 break;
39 }
40
41 return result;
42}
43
44int rdev_set_freq(struct cfg80211_registered_device *rdev,
45 struct wireless_dev *for_wdev,
46 int freq, enum nl80211_channel_type channel_type)
47{
48 struct ieee80211_channel *chan;
49 struct ieee80211_sta_ht_cap *ht_cap;
50 int result;
51
52 if (rdev_fixed_channel(rdev, for_wdev))
53 return -EBUSY;
54
55 if (!rdev->ops->set_channel)
56 return -EOPNOTSUPP;
57
58 chan = ieee80211_get_channel(&rdev->wiphy, freq);
59
60 /* Primary channel not allowed */
61 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
62 return -EINVAL;
63
64 if (channel_type == NL80211_CHAN_HT40MINUS &&
65 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
66 return -EINVAL;
67 else if (channel_type == NL80211_CHAN_HT40PLUS &&
68 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
69 return -EINVAL;
70
71 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
72
73 if (channel_type != NL80211_CHAN_NO_HT) {
74 if (!ht_cap->ht_supported)
75 return -EINVAL;
76
77 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
78 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
79 return -EINVAL;
80 }
81
82 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
83 if (result)
84 return result;
85
86 rdev->channel = chan;
87
88 return 0;
89}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 1e189306560d..45b2be3274db 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -12,6 +12,7 @@
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
16#include <net/genetlink.h> 17#include <net/genetlink.h>
17#include <net/cfg80211.h> 18#include <net/cfg80211.h>
@@ -32,6 +33,7 @@ MODULE_DESCRIPTION("wireless configuration support");
32 * only read the list, and that can happen quite 33 * only read the list, and that can happen quite
33 * often because we need to do it for each command */ 34 * often because we need to do it for each command */
34LIST_HEAD(cfg80211_rdev_list); 35LIST_HEAD(cfg80211_rdev_list);
36int cfg80211_rdev_list_generation;
35 37
36/* 38/*
37 * This is used to protect the cfg80211_rdev_list 39 * This is used to protect the cfg80211_rdev_list
@@ -292,68 +294,17 @@ static void cfg80211_rfkill_sync_work(struct work_struct *work)
292 cfg80211_rfkill_set_block(rdev, rfkill_blocked(rdev->rfkill)); 294 cfg80211_rfkill_set_block(rdev, rfkill_blocked(rdev->rfkill));
293} 295}
294 296
295static void cfg80211_process_events(struct wireless_dev *wdev)
296{
297 struct cfg80211_event *ev;
298 unsigned long flags;
299
300 spin_lock_irqsave(&wdev->event_lock, flags);
301 while (!list_empty(&wdev->event_list)) {
302 ev = list_first_entry(&wdev->event_list,
303 struct cfg80211_event, list);
304 list_del(&ev->list);
305 spin_unlock_irqrestore(&wdev->event_lock, flags);
306
307 wdev_lock(wdev);
308 switch (ev->type) {
309 case EVENT_CONNECT_RESULT:
310 __cfg80211_connect_result(
311 wdev->netdev, ev->cr.bssid,
312 ev->cr.req_ie, ev->cr.req_ie_len,
313 ev->cr.resp_ie, ev->cr.resp_ie_len,
314 ev->cr.status,
315 ev->cr.status == WLAN_STATUS_SUCCESS,
316 NULL);
317 break;
318 case EVENT_ROAMED:
319 __cfg80211_roamed(wdev, ev->rm.bssid,
320 ev->rm.req_ie, ev->rm.req_ie_len,
321 ev->rm.resp_ie, ev->rm.resp_ie_len);
322 break;
323 case EVENT_DISCONNECTED:
324 __cfg80211_disconnected(wdev->netdev,
325 ev->dc.ie, ev->dc.ie_len,
326 ev->dc.reason, true);
327 break;
328 case EVENT_IBSS_JOINED:
329 __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
330 break;
331 }
332 wdev_unlock(wdev);
333
334 kfree(ev);
335
336 spin_lock_irqsave(&wdev->event_lock, flags);
337 }
338 spin_unlock_irqrestore(&wdev->event_lock, flags);
339}
340
341static void cfg80211_event_work(struct work_struct *work) 297static void cfg80211_event_work(struct work_struct *work)
342{ 298{
343 struct cfg80211_registered_device *rdev; 299 struct cfg80211_registered_device *rdev;
344 struct wireless_dev *wdev;
345 300
346 rdev = container_of(work, struct cfg80211_registered_device, 301 rdev = container_of(work, struct cfg80211_registered_device,
347 event_work); 302 event_work);
348 303
349 rtnl_lock(); 304 rtnl_lock();
350 cfg80211_lock_rdev(rdev); 305 cfg80211_lock_rdev(rdev);
351 mutex_lock(&rdev->devlist_mtx);
352 306
353 list_for_each_entry(wdev, &rdev->netdev_list, list) 307 cfg80211_process_rdev_events(rdev);
354 cfg80211_process_events(wdev);
355
356 mutex_unlock(&rdev->devlist_mtx);
357 cfg80211_unlock_rdev(rdev); 308 cfg80211_unlock_rdev(rdev);
358 rtnl_unlock(); 309 rtnl_unlock();
359} 310}
@@ -411,6 +362,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
411 rdev->wiphy.dev.class = &ieee80211_class; 362 rdev->wiphy.dev.class = &ieee80211_class;
412 rdev->wiphy.dev.platform_data = rdev; 363 rdev->wiphy.dev.platform_data = rdev;
413 364
365 rdev->wiphy.ps_default = CONFIG_CFG80211_DEFAULT_PS_VALUE;
366
414 wiphy_net_set(&rdev->wiphy, &init_net); 367 wiphy_net_set(&rdev->wiphy, &init_net);
415 368
416 rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; 369 rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
@@ -427,6 +380,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
427 INIT_WORK(&rdev->conn_work, cfg80211_conn_work); 380 INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
428 INIT_WORK(&rdev->event_work, cfg80211_event_work); 381 INIT_WORK(&rdev->event_work, cfg80211_event_work);
429 382
383 init_waitqueue_head(&rdev->dev_wait);
384
430 /* 385 /*
431 * Initialize wiphy parameters to IEEE 802.11 MIB default values. 386 * Initialize wiphy parameters to IEEE 802.11 MIB default values.
432 * Fragmentation and RTS threshold are disabled by default with the 387 * Fragmentation and RTS threshold are disabled by default with the
@@ -511,6 +466,7 @@ int wiphy_register(struct wiphy *wiphy)
511 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 466 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
512 467
513 list_add(&rdev->list, &cfg80211_rdev_list); 468 list_add(&rdev->list, &cfg80211_rdev_list);
469 cfg80211_rdev_list_generation++;
514 470
515 mutex_unlock(&cfg80211_mutex); 471 mutex_unlock(&cfg80211_mutex);
516 472
@@ -570,7 +526,23 @@ void wiphy_unregister(struct wiphy *wiphy)
570 /* protect the device list */ 526 /* protect the device list */
571 mutex_lock(&cfg80211_mutex); 527 mutex_lock(&cfg80211_mutex);
572 528
529 wait_event(rdev->dev_wait, ({
530 int __count;
531 mutex_lock(&rdev->devlist_mtx);
532 __count = rdev->opencount;
533 mutex_unlock(&rdev->devlist_mtx);
534 __count == 0;}));
535
536 mutex_lock(&rdev->devlist_mtx);
573 BUG_ON(!list_empty(&rdev->netdev_list)); 537 BUG_ON(!list_empty(&rdev->netdev_list));
538 mutex_unlock(&rdev->devlist_mtx);
539
540 /*
541 * First remove the hardware from everywhere, this makes
542 * it impossible to find from userspace.
543 */
544 cfg80211_debugfs_rdev_del(rdev);
545 list_del(&rdev->list);
574 546
575 /* 547 /*
576 * Try to grab rdev->mtx. If a command is still in progress, 548 * Try to grab rdev->mtx. If a command is still in progress,
@@ -578,29 +550,26 @@ void wiphy_unregister(struct wiphy *wiphy)
578 * down the device already. We wait for this command to complete 550 * down the device already. We wait for this command to complete
579 * before unlinking the item from the list. 551 * before unlinking the item from the list.
580 * Note: as codified by the BUG_ON above we cannot get here if 552 * Note: as codified by the BUG_ON above we cannot get here if
581 * a virtual interface is still associated. Hence, we can only 553 * a virtual interface is still present. Hence, we can only get
582 * get to lock contention here if userspace issues a command 554 * to lock contention here if userspace issues a command that
583 * that identified the hardware by wiphy index. 555 * identified the hardware by wiphy index.
584 */ 556 */
585 mutex_lock(&rdev->mtx); 557 cfg80211_lock_rdev(rdev);
586 /* unlock again before freeing */ 558 /* nothing */
587 mutex_unlock(&rdev->mtx); 559 cfg80211_unlock_rdev(rdev);
588
589 cfg80211_debugfs_rdev_del(rdev);
590 560
591 /* If this device got a regulatory hint tell core its 561 /* If this device got a regulatory hint tell core its
592 * free to listen now to a new shiny device regulatory hint */ 562 * free to listen now to a new shiny device regulatory hint */
593 reg_device_remove(wiphy); 563 reg_device_remove(wiphy);
594 564
595 list_del(&rdev->list); 565 cfg80211_rdev_list_generation++;
596 device_del(&rdev->wiphy.dev); 566 device_del(&rdev->wiphy.dev);
597 debugfs_remove(rdev->wiphy.debugfsdir); 567 debugfs_remove(rdev->wiphy.debugfsdir);
598 568
599 mutex_unlock(&cfg80211_mutex); 569 mutex_unlock(&cfg80211_mutex);
600 570
571 flush_work(&rdev->scan_done_wk);
601 cancel_work_sync(&rdev->conn_work); 572 cancel_work_sync(&rdev->conn_work);
602 cancel_work_sync(&rdev->scan_done_wk);
603 kfree(rdev->scan_req);
604 flush_work(&rdev->event_work); 573 flush_work(&rdev->event_work);
605} 574}
606EXPORT_SYMBOL(wiphy_unregister); 575EXPORT_SYMBOL(wiphy_unregister);
@@ -631,6 +600,31 @@ void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
631} 600}
632EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); 601EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
633 602
603static void wdev_cleanup_work(struct work_struct *work)
604{
605 struct wireless_dev *wdev;
606 struct cfg80211_registered_device *rdev;
607
608 wdev = container_of(work, struct wireless_dev, cleanup_work);
609 rdev = wiphy_to_dev(wdev->wiphy);
610
611 cfg80211_lock_rdev(rdev);
612
613 if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) {
614 rdev->scan_req->aborted = true;
615 ___cfg80211_scan_done(rdev, true);
616 }
617
618 cfg80211_unlock_rdev(rdev);
619
620 mutex_lock(&rdev->devlist_mtx);
621 rdev->opencount--;
622 mutex_unlock(&rdev->devlist_mtx);
623 wake_up(&rdev->dev_wait);
624
625 dev_put(wdev->netdev);
626}
627
634static int cfg80211_netdev_notifier_call(struct notifier_block * nb, 628static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
635 unsigned long state, 629 unsigned long state,
636 void *ndev) 630 void *ndev)
@@ -648,11 +642,18 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
648 642
649 switch (state) { 643 switch (state) {
650 case NETDEV_REGISTER: 644 case NETDEV_REGISTER:
645 /*
646 * NB: cannot take rdev->mtx here because this may be
647 * called within code protected by it when interfaces
648 * are added with nl80211.
649 */
651 mutex_init(&wdev->mtx); 650 mutex_init(&wdev->mtx);
651 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
652 INIT_LIST_HEAD(&wdev->event_list); 652 INIT_LIST_HEAD(&wdev->event_list);
653 spin_lock_init(&wdev->event_lock); 653 spin_lock_init(&wdev->event_lock);
654 mutex_lock(&rdev->devlist_mtx); 654 mutex_lock(&rdev->devlist_mtx);
655 list_add(&wdev->list, &rdev->netdev_list); 655 list_add(&wdev->list, &rdev->netdev_list);
656 rdev->devlist_generation++;
656 /* can only change netns with wiphy */ 657 /* can only change netns with wiphy */
657 dev->features |= NETIF_F_NETNS_LOCAL; 658 dev->features |= NETIF_F_NETNS_LOCAL;
658 659
@@ -670,7 +671,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
670 wdev->wext.default_key = -1; 671 wdev->wext.default_key = -1;
671 wdev->wext.default_mgmt_key = -1; 672 wdev->wext.default_mgmt_key = -1;
672 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; 673 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
673 wdev->wext.ps = CONFIG_CFG80211_DEFAULT_PS_VALUE; 674 wdev->wext.ps = wdev->wiphy->ps_default;
674 wdev->wext.ps_timeout = 100; 675 wdev->wext.ps_timeout = 100;
675 if (rdev->ops->set_power_mgmt) 676 if (rdev->ops->set_power_mgmt)
676 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, 677 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
@@ -703,9 +704,26 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
703 break; 704 break;
704 } 705 }
705 break; 706 break;
707 case NETDEV_DOWN:
708 dev_hold(dev);
709 schedule_work(&wdev->cleanup_work);
710 break;
706 case NETDEV_UP: 711 case NETDEV_UP:
712 /*
713 * If we have a really quick DOWN/UP succession we may
714 * have this work still pending ... cancel it and see
715 * if it was pending, in which case we need to account
716 * for some of the work it would have done.
717 */
718 if (cancel_work_sync(&wdev->cleanup_work)) {
719 mutex_lock(&rdev->devlist_mtx);
720 rdev->opencount--;
721 mutex_unlock(&rdev->devlist_mtx);
722 dev_put(dev);
723 }
707#ifdef CONFIG_WIRELESS_EXT 724#ifdef CONFIG_WIRELESS_EXT
708 cfg80211_lock_rdev(rdev); 725 cfg80211_lock_rdev(rdev);
726 mutex_lock(&rdev->devlist_mtx);
709 wdev_lock(wdev); 727 wdev_lock(wdev);
710 switch (wdev->iftype) { 728 switch (wdev->iftype) {
711 case NL80211_IFTYPE_ADHOC: 729 case NL80211_IFTYPE_ADHOC:
@@ -718,10 +736,17 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
718 break; 736 break;
719 } 737 }
720 wdev_unlock(wdev); 738 wdev_unlock(wdev);
739 rdev->opencount++;
740 mutex_unlock(&rdev->devlist_mtx);
721 cfg80211_unlock_rdev(rdev); 741 cfg80211_unlock_rdev(rdev);
722#endif 742#endif
723 break; 743 break;
724 case NETDEV_UNREGISTER: 744 case NETDEV_UNREGISTER:
745 /*
746 * NB: cannot take rdev->mtx here because this may be
747 * called within code protected by it when interfaces
748 * are removed with nl80211.
749 */
725 mutex_lock(&rdev->devlist_mtx); 750 mutex_lock(&rdev->devlist_mtx);
726 /* 751 /*
727 * It is possible to get NETDEV_UNREGISTER 752 * It is possible to get NETDEV_UNREGISTER
@@ -733,7 +758,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
733 if (!list_empty(&wdev->list)) { 758 if (!list_empty(&wdev->list)) {
734 sysfs_remove_link(&dev->dev.kobj, "phy80211"); 759 sysfs_remove_link(&dev->dev.kobj, "phy80211");
735 list_del_init(&wdev->list); 760 list_del_init(&wdev->list);
736 mutex_destroy(&wdev->mtx); 761 rdev->devlist_generation++;
737#ifdef CONFIG_WIRELESS_EXT 762#ifdef CONFIG_WIRELESS_EXT
738 kfree(wdev->wext.keys); 763 kfree(wdev->wext.keys);
739#endif 764#endif
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 325c17e6198c..2a33d8bc886b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -49,6 +49,9 @@ struct cfg80211_registered_device {
49 /* associate netdev list */ 49 /* associate netdev list */
50 struct mutex devlist_mtx; 50 struct mutex devlist_mtx;
51 struct list_head netdev_list; 51 struct list_head netdev_list;
52 int devlist_generation;
53 int opencount; /* also protected by devlist_mtx */
54 wait_queue_head_t dev_wait;
52 55
53 /* BSSes/scanning */ 56 /* BSSes/scanning */
54 spinlock_t bss_lock; 57 spinlock_t bss_lock;
@@ -101,6 +104,7 @@ bool wiphy_idx_valid(int wiphy_idx)
101 104
102extern struct mutex cfg80211_mutex; 105extern struct mutex cfg80211_mutex;
103extern struct list_head cfg80211_rdev_list; 106extern struct list_head cfg80211_rdev_list;
107extern int cfg80211_rdev_list_generation;
104 108
105#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex)) 109#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex))
106 110
@@ -335,7 +339,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
335int __cfg80211_connect(struct cfg80211_registered_device *rdev, 339int __cfg80211_connect(struct cfg80211_registered_device *rdev,
336 struct net_device *dev, 340 struct net_device *dev,
337 struct cfg80211_connect_params *connect, 341 struct cfg80211_connect_params *connect,
338 struct cfg80211_cached_keys *connkeys); 342 struct cfg80211_cached_keys *connkeys,
343 const u8 *prev_bssid);
339int cfg80211_connect(struct cfg80211_registered_device *rdev, 344int cfg80211_connect(struct cfg80211_registered_device *rdev,
340 struct net_device *dev, 345 struct net_device *dev,
341 struct cfg80211_connect_params *connect, 346 struct cfg80211_connect_params *connect,
@@ -353,6 +358,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
353 struct wireless_dev *wdev); 358 struct wireless_dev *wdev);
354 359
355void cfg80211_conn_work(struct work_struct *work); 360void cfg80211_conn_work(struct work_struct *work);
361bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
356 362
357/* internal helpers */ 363/* internal helpers */
358int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, 364int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
@@ -364,6 +370,29 @@ void cfg80211_sme_scan_done(struct net_device *dev);
364void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); 370void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
365void cfg80211_sme_disassoc(struct net_device *dev, int idx); 371void cfg80211_sme_disassoc(struct net_device *dev, int idx);
366void __cfg80211_scan_done(struct work_struct *wk); 372void __cfg80211_scan_done(struct work_struct *wk);
373void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
367void cfg80211_upload_connect_keys(struct wireless_dev *wdev); 374void cfg80211_upload_connect_keys(struct wireless_dev *wdev);
375int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
376 struct net_device *dev, enum nl80211_iftype ntype,
377 u32 *flags, struct vif_params *params);
378void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
379
380struct ieee80211_channel *
381rdev_fixed_channel(struct cfg80211_registered_device *rdev,
382 struct wireless_dev *for_wdev);
383int rdev_set_freq(struct cfg80211_registered_device *rdev,
384 struct wireless_dev *for_wdev,
385 int freq, enum nl80211_channel_type channel_type);
386
387#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
388#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
389#else
390/*
391 * Trick to enable using it as a condition,
392 * and also not give a warning when it's
393 * not used that way.
394 */
395#define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; })
396#endif
368 397
369#endif /* __NET_WIRELESS_CORE_H */ 398#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 4d7a084b35e2..c88338911979 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -22,7 +22,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
22 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) 22 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
23 return; 23 return;
24 24
25 if (WARN_ON(!wdev->ssid_len)) 25 if (!wdev->ssid_len)
26 return; 26 return;
27 27
28 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 28 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
@@ -58,6 +58,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
58 struct cfg80211_event *ev; 58 struct cfg80211_event *ev;
59 unsigned long flags; 59 unsigned long flags;
60 60
61 CFG80211_DEV_WARN_ON(!wdev->ssid_len);
62
61 ev = kzalloc(sizeof(*ev), gfp); 63 ev = kzalloc(sizeof(*ev), gfp);
62 if (!ev) 64 if (!ev)
63 return; 65 return;
@@ -78,10 +80,15 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
78 struct cfg80211_cached_keys *connkeys) 80 struct cfg80211_cached_keys *connkeys)
79{ 81{
80 struct wireless_dev *wdev = dev->ieee80211_ptr; 82 struct wireless_dev *wdev = dev->ieee80211_ptr;
83 struct ieee80211_channel *chan;
81 int err; 84 int err;
82 85
83 ASSERT_WDEV_LOCK(wdev); 86 ASSERT_WDEV_LOCK(wdev);
84 87
88 chan = rdev_fixed_channel(rdev, wdev);
89 if (chan && chan != params->channel)
90 return -EBUSY;
91
85 if (wdev->ssid_len) 92 if (wdev->ssid_len)
86 return -EALREADY; 93 return -EALREADY;
87 94
@@ -112,9 +119,11 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
112 struct wireless_dev *wdev = dev->ieee80211_ptr; 119 struct wireless_dev *wdev = dev->ieee80211_ptr;
113 int err; 120 int err;
114 121
122 mutex_lock(&rdev->devlist_mtx);
115 wdev_lock(wdev); 123 wdev_lock(wdev);
116 err = __cfg80211_join_ibss(rdev, dev, params, connkeys); 124 err = __cfg80211_join_ibss(rdev, dev, params, connkeys);
117 wdev_unlock(wdev); 125 wdev_unlock(wdev);
126 mutex_unlock(&rdev->devlist_mtx);
118 127
119 return err; 128 return err;
120} 129}
@@ -264,27 +273,32 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
264 273
265int cfg80211_ibss_wext_siwfreq(struct net_device *dev, 274int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
266 struct iw_request_info *info, 275 struct iw_request_info *info,
267 struct iw_freq *freq, char *extra) 276 struct iw_freq *wextfreq, char *extra)
268{ 277{
269 struct wireless_dev *wdev = dev->ieee80211_ptr; 278 struct wireless_dev *wdev = dev->ieee80211_ptr;
270 struct ieee80211_channel *chan; 279 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
271 int err; 280 struct ieee80211_channel *chan = NULL;
281 int err, freq;
272 282
273 /* call only for ibss! */ 283 /* call only for ibss! */
274 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) 284 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
275 return -EINVAL; 285 return -EINVAL;
276 286
277 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss) 287 if (!rdev->ops->join_ibss)
278 return -EOPNOTSUPP; 288 return -EOPNOTSUPP;
279 289
280 chan = cfg80211_wext_freq(wdev->wiphy, freq); 290 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
281 if (chan && IS_ERR(chan)) 291 if (freq < 0)
282 return PTR_ERR(chan); 292 return freq;
283 293
284 if (chan && 294 if (freq) {
285 (chan->flags & IEEE80211_CHAN_NO_IBSS || 295 chan = ieee80211_get_channel(wdev->wiphy, freq);
286 chan->flags & IEEE80211_CHAN_DISABLED)) 296 if (!chan)
287 return -EINVAL; 297 return -EINVAL;
298 if (chan->flags & IEEE80211_CHAN_NO_IBSS ||
299 chan->flags & IEEE80211_CHAN_DISABLED)
300 return -EINVAL;
301 }
288 302
289 if (wdev->wext.ibss.channel == chan) 303 if (wdev->wext.ibss.channel == chan)
290 return 0; 304 return 0;
@@ -292,8 +306,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
292 wdev_lock(wdev); 306 wdev_lock(wdev);
293 err = 0; 307 err = 0;
294 if (wdev->ssid_len) 308 if (wdev->ssid_len)
295 err = __cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy), 309 err = __cfg80211_leave_ibss(rdev, dev, true);
296 dev, true);
297 wdev_unlock(wdev); 310 wdev_unlock(wdev);
298 311
299 if (err) 312 if (err)
@@ -307,9 +320,11 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
307 wdev->wext.ibss.channel_fixed = false; 320 wdev->wext.ibss.channel_fixed = false;
308 } 321 }
309 322
323 mutex_lock(&rdev->devlist_mtx);
310 wdev_lock(wdev); 324 wdev_lock(wdev);
311 err = cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev); 325 err = cfg80211_ibss_wext_join(rdev, wdev);
312 wdev_unlock(wdev); 326 wdev_unlock(wdev);
327 mutex_unlock(&rdev->devlist_mtx);
313 328
314 return err; 329 return err;
315} 330}
@@ -347,6 +362,7 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
347 struct iw_point *data, char *ssid) 362 struct iw_point *data, char *ssid)
348{ 363{
349 struct wireless_dev *wdev = dev->ieee80211_ptr; 364 struct wireless_dev *wdev = dev->ieee80211_ptr;
365 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
350 size_t len = data->length; 366 size_t len = data->length;
351 int err; 367 int err;
352 368
@@ -354,14 +370,13 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
354 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) 370 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
355 return -EINVAL; 371 return -EINVAL;
356 372
357 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss) 373 if (!rdev->ops->join_ibss)
358 return -EOPNOTSUPP; 374 return -EOPNOTSUPP;
359 375
360 wdev_lock(wdev); 376 wdev_lock(wdev);
361 err = 0; 377 err = 0;
362 if (wdev->ssid_len) 378 if (wdev->ssid_len)
363 err = __cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy), 379 err = __cfg80211_leave_ibss(rdev, dev, true);
364 dev, true);
365 wdev_unlock(wdev); 380 wdev_unlock(wdev);
366 381
367 if (err) 382 if (err)
@@ -375,9 +390,11 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
375 memcpy(wdev->wext.ibss.ssid, ssid, len); 390 memcpy(wdev->wext.ibss.ssid, ssid, len);
376 wdev->wext.ibss.ssid_len = len; 391 wdev->wext.ibss.ssid_len = len;
377 392
393 mutex_lock(&rdev->devlist_mtx);
378 wdev_lock(wdev); 394 wdev_lock(wdev);
379 err = cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev); 395 err = cfg80211_ibss_wext_join(rdev, wdev);
380 wdev_unlock(wdev); 396 wdev_unlock(wdev);
397 mutex_unlock(&rdev->devlist_mtx);
381 398
382 return err; 399 return err;
383} 400}
@@ -414,6 +431,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
414 struct sockaddr *ap_addr, char *extra) 431 struct sockaddr *ap_addr, char *extra)
415{ 432{
416 struct wireless_dev *wdev = dev->ieee80211_ptr; 433 struct wireless_dev *wdev = dev->ieee80211_ptr;
434 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
417 u8 *bssid = ap_addr->sa_data; 435 u8 *bssid = ap_addr->sa_data;
418 int err; 436 int err;
419 437
@@ -421,7 +439,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
421 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) 439 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
422 return -EINVAL; 440 return -EINVAL;
423 441
424 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss) 442 if (!rdev->ops->join_ibss)
425 return -EOPNOTSUPP; 443 return -EOPNOTSUPP;
426 444
427 if (ap_addr->sa_family != ARPHRD_ETHER) 445 if (ap_addr->sa_family != ARPHRD_ETHER)
@@ -443,8 +461,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
443 wdev_lock(wdev); 461 wdev_lock(wdev);
444 err = 0; 462 err = 0;
445 if (wdev->ssid_len) 463 if (wdev->ssid_len)
446 err = __cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy), 464 err = __cfg80211_leave_ibss(rdev, dev, true);
447 dev, true);
448 wdev_unlock(wdev); 465 wdev_unlock(wdev);
449 466
450 if (err) 467 if (err)
@@ -456,9 +473,11 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
456 } else 473 } else
457 wdev->wext.ibss.bssid = NULL; 474 wdev->wext.ibss.bssid = NULL;
458 475
476 mutex_lock(&rdev->devlist_mtx);
459 wdev_lock(wdev); 477 wdev_lock(wdev);
460 err = cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev); 478 err = cfg80211_ibss_wext_join(rdev, wdev);
461 wdev_unlock(wdev); 479 wdev_unlock(wdev);
480 mutex_unlock(&rdev->devlist_mtx);
462 481
463 return err; 482 return err;
464} 483}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 525e8e247b30..79d2eec54cec 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -67,6 +67,16 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
67 67
68 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 68 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
69 69
70 /*
71 * This is a bit of a hack, we don't notify userspace of
72 * a (re-)association reply if we tried to send a reassoc
73 * and got a reject -- we only try again with an assoc
74 * frame instead of reassoc.
75 */
76 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
77 cfg80211_sme_failed_reassoc(wdev))
78 goto out;
79
70 nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); 80 nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
71 81
72 if (status_code == WLAN_STATUS_SUCCESS) { 82 if (status_code == WLAN_STATUS_SUCCESS) {
@@ -86,6 +96,15 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
86 WARN_ON(!bss); 96 WARN_ON(!bss);
87 } 97 }
88 98
99 if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
100 /*
101 * This is for the userspace SME, the CONNECTING
102 * state will be changed to CONNECTED by
103 * __cfg80211_connect_result() below.
104 */
105 wdev->sme_state = CFG80211_SME_CONNECTING;
106 }
107
89 /* this consumes one bss reference (unless bss is NULL) */ 108 /* this consumes one bss reference (unless bss is NULL) */
90 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, 109 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
91 status_code, 110 status_code,
@@ -97,6 +116,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
97 cfg80211_put_bss(&bss->pub); 116 cfg80211_put_bss(&bss->pub);
98 } 117 }
99 118
119 out:
100 wdev_unlock(wdev); 120 wdev_unlock(wdev);
101} 121}
102EXPORT_SYMBOL(cfg80211_send_rx_assoc); 122EXPORT_SYMBOL(cfg80211_send_rx_assoc);
@@ -149,7 +169,7 @@ static void __cfg80211_send_deauth(struct net_device *dev,
149 169
150 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 170 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
151 171
152 from_ap = memcmp(mgmt->da, dev->dev_addr, ETH_ALEN) == 0; 172 from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
153 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); 173 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
154 } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { 174 } else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
155 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, 175 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
@@ -198,7 +218,7 @@ static void __cfg80211_send_disassoc(struct net_device *dev,
198 return; 218 return;
199 219
200 if (wdev->current_bss && 220 if (wdev->current_bss &&
201 memcmp(wdev->current_bss, bssid, ETH_ALEN) == 0) { 221 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
202 for (i = 0; i < MAX_AUTH_BSSES; i++) { 222 for (i = 0; i < MAX_AUTH_BSSES; i++) {
203 if (wdev->authtry_bsses[i] || wdev->auth_bsses[i]) 223 if (wdev->authtry_bsses[i] || wdev->auth_bsses[i])
204 continue; 224 continue;
@@ -215,7 +235,7 @@ static void __cfg80211_send_disassoc(struct net_device *dev,
215 235
216 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 236 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
217 237
218 from_ap = memcmp(mgmt->da, dev->dev_addr, ETH_ALEN) == 0; 238 from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
219 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); 239 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
220} 240}
221 241
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 0cd548267d4a..eddab097435c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -408,6 +408,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
408 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); 408 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
409 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 409 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
410 410
411 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
412 cfg80211_rdev_list_generation);
413
411 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, 414 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
412 dev->wiphy.retry_short); 415 dev->wiphy.retry_short);
413 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, 416 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
@@ -701,15 +704,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
701 704
702 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 705 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
703 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 706 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
704 struct ieee80211_channel *chan;
705 struct ieee80211_sta_ht_cap *ht_cap;
706 u32 freq; 707 u32 freq;
707 708
708 if (!rdev->ops->set_channel) {
709 result = -EOPNOTSUPP;
710 goto bad_res;
711 }
712
713 result = -EINVAL; 709 result = -EINVAL;
714 710
715 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 711 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -723,42 +719,12 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
723 } 719 }
724 720
725 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 721 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
726 chan = ieee80211_get_channel(&rdev->wiphy, freq);
727 722
728 /* Primary channel not allowed */ 723 mutex_lock(&rdev->devlist_mtx);
729 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 724 result = rdev_set_freq(rdev, NULL, freq, channel_type);
730 goto bad_res; 725 mutex_unlock(&rdev->devlist_mtx);
731
732 if (channel_type == NL80211_CHAN_HT40MINUS &&
733 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS))
734 goto bad_res;
735 else if (channel_type == NL80211_CHAN_HT40PLUS &&
736 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS))
737 goto bad_res;
738
739 /*
740 * At this point we know if that if HT40 was requested
741 * we are allowed to use it and the extension channel
742 * exists.
743 */
744
745 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
746
747 /* no HT capabilities or intolerant */
748 if (channel_type != NL80211_CHAN_NO_HT) {
749 if (!ht_cap->ht_supported)
750 goto bad_res;
751 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
752 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
753 goto bad_res;
754 }
755
756 result = rdev->ops->set_channel(&rdev->wiphy, chan,
757 channel_type);
758 if (result) 726 if (result)
759 goto bad_res; 727 goto bad_res;
760
761 rdev->channel = chan;
762 } 728 }
763 729
764 changed = 0; 730 changed = 0;
@@ -862,6 +828,11 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
862 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 828 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
863 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); 829 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
864 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); 830 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
831
832 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
833 rdev->devlist_generation ^
834 (cfg80211_rdev_list_generation << 2));
835
865 return genlmsg_end(msg, hdr); 836 return genlmsg_end(msg, hdr);
866 837
867 nla_put_failure: 838 nla_put_failure:
@@ -875,12 +846,12 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
875 int if_idx = 0; 846 int if_idx = 0;
876 int wp_start = cb->args[0]; 847 int wp_start = cb->args[0];
877 int if_start = cb->args[1]; 848 int if_start = cb->args[1];
878 struct cfg80211_registered_device *dev; 849 struct cfg80211_registered_device *rdev;
879 struct wireless_dev *wdev; 850 struct wireless_dev *wdev;
880 851
881 mutex_lock(&cfg80211_mutex); 852 mutex_lock(&cfg80211_mutex);
882 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 853 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
883 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 854 if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
884 continue; 855 continue;
885 if (wp_idx < wp_start) { 856 if (wp_idx < wp_start) {
886 wp_idx++; 857 wp_idx++;
@@ -888,21 +859,21 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
888 } 859 }
889 if_idx = 0; 860 if_idx = 0;
890 861
891 mutex_lock(&dev->devlist_mtx); 862 mutex_lock(&rdev->devlist_mtx);
892 list_for_each_entry(wdev, &dev->netdev_list, list) { 863 list_for_each_entry(wdev, &rdev->netdev_list, list) {
893 if (if_idx < if_start) { 864 if (if_idx < if_start) {
894 if_idx++; 865 if_idx++;
895 continue; 866 continue;
896 } 867 }
897 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, 868 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
898 cb->nlh->nlmsg_seq, NLM_F_MULTI, 869 cb->nlh->nlmsg_seq, NLM_F_MULTI,
899 dev, wdev->netdev) < 0) { 870 rdev, wdev->netdev) < 0) {
900 mutex_unlock(&dev->devlist_mtx); 871 mutex_unlock(&rdev->devlist_mtx);
901 goto out; 872 goto out;
902 } 873 }
903 if_idx++; 874 if_idx++;
904 } 875 }
905 mutex_unlock(&dev->devlist_mtx); 876 mutex_unlock(&rdev->devlist_mtx);
906 877
907 wp_idx++; 878 wp_idx++;
908 } 879 }
@@ -1006,12 +977,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
1006 } 977 }
1007 } 978 }
1008 979
1009 if (!rdev->ops->change_virtual_intf ||
1010 !(rdev->wiphy.interface_modes & (1 << ntype))) {
1011 err = -EOPNOTSUPP;
1012 goto unlock;
1013 }
1014
1015 if (info->attrs[NL80211_ATTR_MESH_ID]) { 980 if (info->attrs[NL80211_ATTR_MESH_ID]) {
1016 if (ntype != NL80211_IFTYPE_MESH_POINT) { 981 if (ntype != NL80211_IFTYPE_MESH_POINT) {
1017 err = -EINVAL; 982 err = -EINVAL;
@@ -1037,18 +1002,10 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
1037 } 1002 }
1038 1003
1039 if (change) 1004 if (change)
1040 err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, 1005 err = cfg80211_change_iface(rdev, dev, ntype, flags, &params);
1041 ntype, flags, &params);
1042 else 1006 else
1043 err = 0; 1007 err = 0;
1044 1008
1045 WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
1046
1047 if (!err && (ntype != otype)) {
1048 if (otype == NL80211_IFTYPE_ADHOC)
1049 cfg80211_clear_ibss(dev, false);
1050 }
1051
1052 unlock: 1009 unlock:
1053 dev_put(dev); 1010 dev_put(dev);
1054 cfg80211_unlock_rdev(rdev); 1011 cfg80211_unlock_rdev(rdev);
@@ -1653,6 +1610,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1653 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 1610 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
1654 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 1611 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
1655 1612
1613 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation);
1614
1656 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); 1615 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
1657 if (!sinfoattr) 1616 if (!sinfoattr)
1658 goto nla_put_failure; 1617 goto nla_put_failure;
@@ -2138,6 +2097,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
2138 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); 2097 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
2139 NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); 2098 NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
2140 2099
2100 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation);
2101
2141 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); 2102 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
2142 if (!pinfoattr) 2103 if (!pinfoattr)
2143 goto nla_put_failure; 2104 goto nla_put_failure;
@@ -2220,7 +2181,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
2220 2181
2221 if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { 2182 if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
2222 err = -EOPNOTSUPP; 2183 err = -EOPNOTSUPP;
2223 goto out; 2184 goto out_err;
2224 } 2185 }
2225 2186
2226 while (1) { 2187 while (1) {
@@ -3027,10 +2988,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3027 goto out; 2988 goto out;
3028 } 2989 }
3029 2990
3030 request->channels = (void *)((char *)request + sizeof(*request));
3031 request->n_channels = n_channels; 2991 request->n_channels = n_channels;
3032 if (n_ssids) 2992 if (n_ssids)
3033 request->ssids = (void *)(request->channels + n_channels); 2993 request->ssids = (void *)&request->channels[n_channels];
3034 request->n_ssids = n_ssids; 2994 request->n_ssids = n_ssids;
3035 if (ie_len) { 2995 if (ie_len) {
3036 if (request->ssids) 2996 if (request->ssids)
@@ -3127,8 +3087,7 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3127 if (!hdr) 3087 if (!hdr)
3128 return -1; 3088 return -1;
3129 3089
3130 NLA_PUT_U32(msg, NL80211_ATTR_SCAN_GENERATION, 3090 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
3131 rdev->bss_generation);
3132 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); 3091 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
3133 3092
3134 bss = nla_nest_start(msg, NL80211_ATTR_BSS); 3093 bss = nla_nest_start(msg, NL80211_ATTR_BSS);
@@ -3453,7 +3412,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3453 struct cfg80211_registered_device *rdev; 3412 struct cfg80211_registered_device *rdev;
3454 struct net_device *dev; 3413 struct net_device *dev;
3455 struct cfg80211_crypto_settings crypto; 3414 struct cfg80211_crypto_settings crypto;
3456 struct ieee80211_channel *chan; 3415 struct ieee80211_channel *chan, *fixedchan;
3457 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 3416 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
3458 int err, ssid_len, ie_len = 0; 3417 int err, ssid_len, ie_len = 0;
3459 bool use_mfp = false; 3418 bool use_mfp = false;
@@ -3496,6 +3455,15 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3496 goto out; 3455 goto out;
3497 } 3456 }
3498 3457
3458 mutex_lock(&rdev->devlist_mtx);
3459 fixedchan = rdev_fixed_channel(rdev, NULL);
3460 if (fixedchan && chan != fixedchan) {
3461 err = -EBUSY;
3462 mutex_unlock(&rdev->devlist_mtx);
3463 goto out;
3464 }
3465 mutex_unlock(&rdev->devlist_mtx);
3466
3499 ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); 3467 ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
3500 ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); 3468 ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
3501 3469
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b3ac0aace0e5..f256dfffbf46 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1018,7 +1018,6 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
1018 map_regdom_flags(reg_rule->flags) | bw_flags; 1018 map_regdom_flags(reg_rule->flags) | bw_flags;
1019 chan->max_antenna_gain = chan->orig_mag = 1019 chan->max_antenna_gain = chan->orig_mag =
1020 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1020 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1021 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1022 chan->max_power = chan->orig_mpwr = 1021 chan->max_power = chan->orig_mpwr =
1023 (int) MBM_TO_DBM(power_rule->max_eirp); 1022 (int) MBM_TO_DBM(power_rule->max_eirp);
1024 return; 1023 return;
@@ -1027,7 +1026,6 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
1027 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 1026 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
1028 chan->max_antenna_gain = min(chan->orig_mag, 1027 chan->max_antenna_gain = min(chan->orig_mag,
1029 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 1028 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
1030 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1031 if (chan->orig_mpwr) 1029 if (chan->orig_mpwr)
1032 chan->max_power = min(chan->orig_mpwr, 1030 chan->max_power = min(chan->orig_mpwr,
1033 (int) MBM_TO_DBM(power_rule->max_eirp)); 1031 (int) MBM_TO_DBM(power_rule->max_eirp));
@@ -1095,17 +1093,18 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1095 1093
1096 chan->beacon_found = true; 1094 chan->beacon_found = true;
1097 1095
1096 if (wiphy->disable_beacon_hints)
1097 return;
1098
1098 chan_before.center_freq = chan->center_freq; 1099 chan_before.center_freq = chan->center_freq;
1099 chan_before.flags = chan->flags; 1100 chan_before.flags = chan->flags;
1100 1101
1101 if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) && 1102 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
1102 !(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1103 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; 1103 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
1104 channel_changed = true; 1104 channel_changed = true;
1105 } 1105 }
1106 1106
1107 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 1107 if (chan->flags & IEEE80211_CHAN_NO_IBSS) {
1108 !(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) {
1109 chan->flags &= ~IEEE80211_CHAN_NO_IBSS; 1108 chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
1110 channel_changed = true; 1109 channel_changed = true;
1111 } 1110 }
@@ -1328,7 +1327,6 @@ static void handle_channel_custom(struct wiphy *wiphy,
1328 1327
1329 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; 1328 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
1330 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1329 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1331 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1332 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 1330 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
1333} 1331}
1334 1332
@@ -1426,7 +1424,7 @@ static int ignore_request(struct wiphy *wiphy,
1426 if (last_wiphy != wiphy) { 1424 if (last_wiphy != wiphy) {
1427 /* 1425 /*
1428 * Two cards with two APs claiming different 1426 * Two cards with two APs claiming different
1429 * different Country IE alpha2s. We could 1427 * Country IE alpha2s. We could
1430 * intersect them, but that seems unlikely 1428 * intersect them, but that seems unlikely
1431 * to be correct. Reject second one for now. 1429 * to be correct. Reject second one for now.
1432 */ 1430 */
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 662a9dad76d5..3362c7c069b2 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -30,7 +30,8 @@ int set_regdom(const struct ieee80211_regdomain *rd);
30 * non-radar 5 GHz channels. 30 * non-radar 5 GHz channels.
31 * 31 *
32 * Drivers do not need to call this, cfg80211 will do it for after a scan 32 * Drivers do not need to call this, cfg80211 will do it for after a scan
33 * on a newly found BSS. 33 * on a newly found BSS. If you cannot make use of this feature you can
34 * set the wiphy->disable_beacon_hints to true.
34 */ 35 */
35int regulatory_hint_found_beacon(struct wiphy *wiphy, 36int regulatory_hint_found_beacon(struct wiphy *wiphy,
36 struct ieee80211_channel *beacon_chan, 37 struct ieee80211_channel *beacon_chan,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 67714d7ed5b4..4c210c2debc6 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -18,21 +18,21 @@
18 18
19#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) 19#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ)
20 20
21void __cfg80211_scan_done(struct work_struct *wk) 21void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
22{ 22{
23 struct cfg80211_registered_device *rdev;
24 struct cfg80211_scan_request *request; 23 struct cfg80211_scan_request *request;
25 struct net_device *dev; 24 struct net_device *dev;
26#ifdef CONFIG_WIRELESS_EXT 25#ifdef CONFIG_WIRELESS_EXT
27 union iwreq_data wrqu; 26 union iwreq_data wrqu;
28#endif 27#endif
29 28
30 rdev = container_of(wk, struct cfg80211_registered_device, 29 ASSERT_RDEV_LOCK(rdev);
31 scan_done_wk);
32 30
33 mutex_lock(&rdev->mtx);
34 request = rdev->scan_req; 31 request = rdev->scan_req;
35 32
33 if (!request)
34 return;
35
36 dev = request->dev; 36 dev = request->dev;
37 37
38 /* 38 /*
@@ -43,9 +43,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
43 cfg80211_sme_scan_done(dev); 43 cfg80211_sme_scan_done(dev);
44 44
45 if (request->aborted) 45 if (request->aborted)
46 nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev); 46 nl80211_send_scan_aborted(rdev, dev);
47 else 47 else
48 nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); 48 nl80211_send_scan_done(rdev, dev);
49 49
50#ifdef CONFIG_WIRELESS_EXT 50#ifdef CONFIG_WIRELESS_EXT
51 if (!request->aborted) { 51 if (!request->aborted) {
@@ -57,9 +57,30 @@ void __cfg80211_scan_done(struct work_struct *wk)
57 57
58 dev_put(dev); 58 dev_put(dev);
59 59
60 rdev->scan_req = NULL;
61
62 /*
63 * OK. If this is invoked with "leak" then we can't
64 * free this ... but we've cleaned it up anyway. The
65 * driver failed to call the scan_done callback, so
66 * all bets are off, it might still be trying to use
67 * the scan request or not ... if it accesses the dev
68 * in there (it shouldn't anyway) then it may crash.
69 */
70 if (!leak)
71 kfree(request);
72}
73
74void __cfg80211_scan_done(struct work_struct *wk)
75{
76 struct cfg80211_registered_device *rdev;
77
78 rdev = container_of(wk, struct cfg80211_registered_device,
79 scan_done_wk);
80
81 cfg80211_lock_rdev(rdev);
82 ___cfg80211_scan_done(rdev, false);
60 cfg80211_unlock_rdev(rdev); 83 cfg80211_unlock_rdev(rdev);
61 wiphy_to_dev(request->wiphy)->scan_req = NULL;
62 kfree(request);
63} 84}
64 85
65void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) 86void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@@ -120,7 +141,7 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
120 dev->bss_generation++; 141 dev->bss_generation++;
121} 142}
122 143
123static u8 *find_ie(u8 num, u8 *ies, size_t len) 144static u8 *find_ie(u8 num, u8 *ies, int len)
124{ 145{
125 while (len > 2 && ies[0] != num) { 146 while (len > 2 && ies[0] != num) {
126 len -= ies[1] + 2; 147 len -= ies[1] + 2;
@@ -141,7 +162,7 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
141 162
142 if (!ie1 && !ie2) 163 if (!ie1 && !ie2)
143 return 0; 164 return 0;
144 if (!ie1) 165 if (!ie1 || !ie2)
145 return -1; 166 return -1;
146 167
147 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); 168 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1]));
@@ -194,6 +215,8 @@ static bool is_mesh(struct cfg80211_bss *a,
194 ie = find_ie(WLAN_EID_MESH_CONFIG, 215 ie = find_ie(WLAN_EID_MESH_CONFIG,
195 a->information_elements, 216 a->information_elements,
196 a->len_information_elements); 217 a->len_information_elements);
218 if (!ie)
219 return false;
197 if (ie[1] != IEEE80211_MESH_CONFIG_LEN) 220 if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
198 return false; 221 return false;
199 222
@@ -560,6 +583,7 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
560 spin_lock_bh(&dev->bss_lock); 583 spin_lock_bh(&dev->bss_lock);
561 584
562 list_del(&bss->list); 585 list_del(&bss->list);
586 dev->bss_generation++;
563 rb_erase(&bss->rbn, &dev->bss_tree); 587 rb_erase(&bss->rbn, &dev->bss_tree);
564 588
565 spin_unlock_bh(&dev->bss_lock); 589 spin_unlock_bh(&dev->bss_lock);
@@ -583,6 +607,9 @@ int cfg80211_wext_siwscan(struct net_device *dev,
583 if (!netif_running(dev)) 607 if (!netif_running(dev))
584 return -ENETDOWN; 608 return -ENETDOWN;
585 609
610 if (wrqu->data.length == sizeof(struct iw_scan_req))
611 wreq = (struct iw_scan_req *)extra;
612
586 rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); 613 rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex);
587 614
588 if (IS_ERR(rdev)) 615 if (IS_ERR(rdev))
@@ -595,9 +622,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
595 622
596 wiphy = &rdev->wiphy; 623 wiphy = &rdev->wiphy;
597 624
598 for (band = 0; band < IEEE80211_NUM_BANDS; band++) 625 /* Determine number of channels, needed to allocate creq */
599 if (wiphy->bands[band]) 626 if (wreq && wreq->num_channels)
600 n_channels += wiphy->bands[band]->n_channels; 627 n_channels = wreq->num_channels;
628 else {
629 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
630 if (wiphy->bands[band])
631 n_channels += wiphy->bands[band]->n_channels;
632 }
601 633
602 creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + 634 creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
603 n_channels * sizeof(void *), 635 n_channels * sizeof(void *),
@@ -609,27 +641,46 @@ int cfg80211_wext_siwscan(struct net_device *dev,
609 641
610 creq->wiphy = wiphy; 642 creq->wiphy = wiphy;
611 creq->dev = dev; 643 creq->dev = dev;
612 creq->ssids = (void *)(creq + 1); 644 /* SSIDs come after channels */
613 creq->channels = (void *)(creq->ssids + 1); 645 creq->ssids = (void *)&creq->channels[n_channels];
614 creq->n_channels = n_channels; 646 creq->n_channels = n_channels;
615 creq->n_ssids = 1; 647 creq->n_ssids = 1;
616 648
617 /* all channels */ 649 /* translate "Scan on frequencies" request */
618 i = 0; 650 i = 0;
619 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 651 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
620 int j; 652 int j;
621 if (!wiphy->bands[band]) 653 if (!wiphy->bands[band])
622 continue; 654 continue;
623 for (j = 0; j < wiphy->bands[band]->n_channels; j++) { 655 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
656
657 /* If we have a wireless request structure and the
658 * wireless request specifies frequencies, then search
659 * for the matching hardware channel.
660 */
661 if (wreq && wreq->num_channels) {
662 int k;
663 int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
664 for (k = 0; k < wreq->num_channels; k++) {
665 int wext_freq = wreq->channel_list[k].m / 100000;
666 if (wext_freq == wiphy_freq)
667 goto wext_freq_found;
668 }
669 goto wext_freq_not_found;
670 }
671
672 wext_freq_found:
624 creq->channels[i] = &wiphy->bands[band]->channels[j]; 673 creq->channels[i] = &wiphy->bands[band]->channels[j];
625 i++; 674 i++;
675 wext_freq_not_found: ;
626 } 676 }
627 } 677 }
628 678
629 /* translate scan request */ 679 /* Set real number of channels specified in creq->channels[] */
630 if (wrqu->data.length == sizeof(struct iw_scan_req)) { 680 creq->n_channels = i;
631 wreq = (struct iw_scan_req *)extra;
632 681
682 /* translate "Scan for SSID" request */
683 if (wreq) {
633 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 684 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
634 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) 685 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
635 return -EINVAL; 686 return -EINVAL;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 8a7dcbf90602..68307883ec87 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -27,10 +27,10 @@ struct cfg80211_conn {
27 CFG80211_CONN_ASSOCIATE_NEXT, 27 CFG80211_CONN_ASSOCIATE_NEXT,
28 CFG80211_CONN_ASSOCIATING, 28 CFG80211_CONN_ASSOCIATING,
29 } state; 29 } state;
30 u8 bssid[ETH_ALEN]; 30 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
31 u8 *ie; 31 u8 *ie;
32 size_t ie_len; 32 size_t ie_len;
33 bool auto_auth; 33 bool auto_auth, prev_bssid_valid;
34}; 34};
35 35
36 36
@@ -65,7 +65,6 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
65 if (!request) 65 if (!request)
66 return -ENOMEM; 66 return -ENOMEM;
67 67
68 request->channels = (void *)((char *)request + sizeof(*request));
69 if (wdev->conn->params.channel) 68 if (wdev->conn->params.channel)
70 request->channels[0] = wdev->conn->params.channel; 69 request->channels[0] = wdev->conn->params.channel;
71 else { 70 else {
@@ -82,7 +81,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
82 } 81 }
83 } 82 }
84 request->n_channels = n_channels; 83 request->n_channels = n_channels;
85 request->ssids = (void *)(request->channels + n_channels); 84 request->ssids = (void *)&request->channels[n_channels];
86 request->n_ssids = 1; 85 request->n_ssids = 1;
87 86
88 memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, 87 memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
@@ -110,6 +109,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
110{ 109{
111 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 110 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
112 struct cfg80211_connect_params *params; 111 struct cfg80211_connect_params *params;
112 const u8 *prev_bssid = NULL;
113 int err; 113 int err;
114 114
115 ASSERT_WDEV_LOCK(wdev); 115 ASSERT_WDEV_LOCK(wdev);
@@ -135,15 +135,11 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
135 case CFG80211_CONN_ASSOCIATE_NEXT: 135 case CFG80211_CONN_ASSOCIATE_NEXT:
136 BUG_ON(!rdev->ops->assoc); 136 BUG_ON(!rdev->ops->assoc);
137 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 137 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
138 /* 138 if (wdev->conn->prev_bssid_valid)
139 * We could, later, implement roaming here and then actually 139 prev_bssid = wdev->conn->prev_bssid;
140 * set prev_bssid to non-NULL. But then we need to be aware
141 * that some APs don't like that -- so we'd need to retry
142 * the association.
143 */
144 err = __cfg80211_mlme_assoc(rdev, wdev->netdev, 140 err = __cfg80211_mlme_assoc(rdev, wdev->netdev,
145 params->channel, params->bssid, 141 params->channel, params->bssid,
146 NULL, 142 prev_bssid,
147 params->ssid, params->ssid_len, 143 params->ssid, params->ssid_len,
148 params->ie, params->ie_len, 144 params->ie, params->ie_len,
149 false, &params->crypto); 145 false, &params->crypto);
@@ -256,9 +252,11 @@ void cfg80211_sme_scan_done(struct net_device *dev)
256{ 252{
257 struct wireless_dev *wdev = dev->ieee80211_ptr; 253 struct wireless_dev *wdev = dev->ieee80211_ptr;
258 254
255 mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
259 wdev_lock(wdev); 256 wdev_lock(wdev);
260 __cfg80211_sme_scan_done(dev); 257 __cfg80211_sme_scan_done(dev);
261 wdev_unlock(wdev); 258 wdev_unlock(wdev);
259 mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
262} 260}
263 261
264void cfg80211_sme_rx_auth(struct net_device *dev, 262void cfg80211_sme_rx_auth(struct net_device *dev,
@@ -314,6 +312,28 @@ void cfg80211_sme_rx_auth(struct net_device *dev,
314 } 312 }
315} 313}
316 314
315bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev)
316{
317 struct wiphy *wiphy = wdev->wiphy;
318 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
319
320 if (WARN_ON(!wdev->conn))
321 return false;
322
323 if (!wdev->conn->prev_bssid_valid)
324 return false;
325
326 /*
327 * Some stupid APs don't accept reassoc, so we
328 * need to fall back to trying regular assoc.
329 */
330 wdev->conn->prev_bssid_valid = false;
331 wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
332 schedule_work(&rdev->conn_work);
333
334 return true;
335}
336
317void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 337void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
318 const u8 *req_ie, size_t req_ie_len, 338 const u8 *req_ie, size_t req_ie_len,
319 const u8 *resp_ie, size_t resp_ie_len, 339 const u8 *resp_ie, size_t resp_ie_len,
@@ -331,15 +351,13 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
331 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 351 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
332 return; 352 return;
333 353
334 if (wdev->sme_state == CFG80211_SME_CONNECTED) 354 if (wdev->sme_state != CFG80211_SME_CONNECTING)
335 nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), dev, 355 return;
356
357 nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev,
336 bssid, req_ie, req_ie_len, 358 bssid, req_ie, req_ie_len,
337 resp_ie, resp_ie_len, GFP_KERNEL); 359 resp_ie, resp_ie_len,
338 else 360 status, GFP_KERNEL);
339 nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev,
340 bssid, req_ie, req_ie_len,
341 resp_ie, resp_ie_len,
342 status, GFP_KERNEL);
343 361
344#ifdef CONFIG_WIRELESS_EXT 362#ifdef CONFIG_WIRELESS_EXT
345 if (wextev) { 363 if (wextev) {
@@ -357,8 +375,11 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
357 375
358 memset(&wrqu, 0, sizeof(wrqu)); 376 memset(&wrqu, 0, sizeof(wrqu));
359 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 377 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
360 if (bssid && status == WLAN_STATUS_SUCCESS) 378 if (bssid && status == WLAN_STATUS_SUCCESS) {
361 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); 379 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
380 memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
381 wdev->wext.prev_bssid_valid = true;
382 }
362 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 383 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
363 } 384 }
364#endif 385#endif
@@ -369,18 +390,13 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
369 wdev->current_bss = NULL; 390 wdev->current_bss = NULL;
370 } 391 }
371 392
372 if (status == WLAN_STATUS_SUCCESS &&
373 wdev->sme_state == CFG80211_SME_IDLE)
374 goto success;
375
376 if (wdev->sme_state != CFG80211_SME_CONNECTING)
377 return;
378
379 if (wdev->conn) 393 if (wdev->conn)
380 wdev->conn->state = CFG80211_CONN_IDLE; 394 wdev->conn->state = CFG80211_CONN_IDLE;
381 395
382 if (status != WLAN_STATUS_SUCCESS) { 396 if (status != WLAN_STATUS_SUCCESS) {
383 wdev->sme_state = CFG80211_SME_IDLE; 397 wdev->sme_state = CFG80211_SME_IDLE;
398 if (wdev->conn)
399 kfree(wdev->conn->ie);
384 kfree(wdev->conn); 400 kfree(wdev->conn);
385 wdev->conn = NULL; 401 wdev->conn = NULL;
386 kfree(wdev->connect_keys); 402 kfree(wdev->connect_keys);
@@ -389,7 +405,6 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
389 return; 405 return;
390 } 406 }
391 407
392 success:
393 if (!bss) 408 if (!bss)
394 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 409 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
395 wdev->ssid, wdev->ssid_len, 410 wdev->ssid, wdev->ssid_len,
@@ -430,12 +445,15 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
430 struct cfg80211_event *ev; 445 struct cfg80211_event *ev;
431 unsigned long flags; 446 unsigned long flags;
432 447
448 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
449
433 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); 450 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
434 if (!ev) 451 if (!ev)
435 return; 452 return;
436 453
437 ev->type = EVENT_CONNECT_RESULT; 454 ev->type = EVENT_CONNECT_RESULT;
438 memcpy(ev->cr.bssid, bssid, ETH_ALEN); 455 if (bssid)
456 memcpy(ev->cr.bssid, bssid, ETH_ALEN);
439 ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); 457 ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
440 ev->cr.req_ie_len = req_ie_len; 458 ev->cr.req_ie_len = req_ie_len;
441 memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); 459 memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
@@ -465,7 +483,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
465 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 483 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
466 return; 484 return;
467 485
468 if (WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED)) 486 if (wdev->sme_state != CFG80211_SME_CONNECTED)
469 return; 487 return;
470 488
471 /* internal error -- how did we get to CONNECTED w/o BSS? */ 489 /* internal error -- how did we get to CONNECTED w/o BSS? */
@@ -509,6 +527,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
509 memset(&wrqu, 0, sizeof(wrqu)); 527 memset(&wrqu, 0, sizeof(wrqu));
510 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 528 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
511 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); 529 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
530 memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
531 wdev->wext.prev_bssid_valid = true;
512 wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); 532 wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
513#endif 533#endif
514} 534}
@@ -522,6 +542,8 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
522 struct cfg80211_event *ev; 542 struct cfg80211_event *ev;
523 unsigned long flags; 543 unsigned long flags;
524 544
545 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
546
525 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); 547 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
526 if (!ev) 548 if (!ev)
527 return; 549 return;
@@ -557,7 +579,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
557 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 579 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
558 return; 580 return;
559 581
560 if (WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED)) 582 if (wdev->sme_state != CFG80211_SME_CONNECTED)
561 return; 583 return;
562 584
563 if (wdev->current_bss) { 585 if (wdev->current_bss) {
@@ -570,10 +592,30 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
570 wdev->ssid_len = 0; 592 wdev->ssid_len = 0;
571 593
572 if (wdev->conn) { 594 if (wdev->conn) {
595 const u8 *bssid;
596 int ret;
597
573 kfree(wdev->conn->ie); 598 kfree(wdev->conn->ie);
574 wdev->conn->ie = NULL; 599 wdev->conn->ie = NULL;
575 kfree(wdev->conn); 600 kfree(wdev->conn);
576 wdev->conn = NULL; 601 wdev->conn = NULL;
602
603 /*
604 * If this disconnect was due to a disassoc, we
605 * we might still have an auth BSS around. For
606 * the userspace SME that's currently expected,
607 * but for the kernel SME (nl80211 CONNECT or
608 * wireless extensions) we want to clear up all
609 * state.
610 */
611 for (i = 0; i < MAX_AUTH_BSSES; i++) {
612 if (!wdev->auth_bsses[i])
613 continue;
614 bssid = wdev->auth_bsses[i]->pub.bssid;
615 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
616 WLAN_REASON_DEAUTH_LEAVING);
617 WARN(ret, "deauth failed: %d\n", ret);
618 }
577 } 619 }
578 620
579 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); 621 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
@@ -601,6 +643,8 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
601 struct cfg80211_event *ev; 643 struct cfg80211_event *ev;
602 unsigned long flags; 644 unsigned long flags;
603 645
646 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
647
604 ev = kzalloc(sizeof(*ev) + ie_len, gfp); 648 ev = kzalloc(sizeof(*ev) + ie_len, gfp);
605 if (!ev) 649 if (!ev)
606 return; 650 return;
@@ -621,9 +665,11 @@ EXPORT_SYMBOL(cfg80211_disconnected);
621int __cfg80211_connect(struct cfg80211_registered_device *rdev, 665int __cfg80211_connect(struct cfg80211_registered_device *rdev,
622 struct net_device *dev, 666 struct net_device *dev,
623 struct cfg80211_connect_params *connect, 667 struct cfg80211_connect_params *connect,
624 struct cfg80211_cached_keys *connkeys) 668 struct cfg80211_cached_keys *connkeys,
669 const u8 *prev_bssid)
625{ 670{
626 struct wireless_dev *wdev = dev->ieee80211_ptr; 671 struct wireless_dev *wdev = dev->ieee80211_ptr;
672 struct ieee80211_channel *chan;
627 int err; 673 int err;
628 674
629 ASSERT_WDEV_LOCK(wdev); 675 ASSERT_WDEV_LOCK(wdev);
@@ -631,6 +677,10 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
631 if (wdev->sme_state != CFG80211_SME_IDLE) 677 if (wdev->sme_state != CFG80211_SME_IDLE)
632 return -EALREADY; 678 return -EALREADY;
633 679
680 chan = rdev_fixed_channel(rdev, wdev);
681 if (chan && chan != connect->channel)
682 return -EBUSY;
683
634 if (WARN_ON(wdev->connect_keys)) { 684 if (WARN_ON(wdev->connect_keys)) {
635 kfree(wdev->connect_keys); 685 kfree(wdev->connect_keys);
636 wdev->connect_keys = NULL; 686 wdev->connect_keys = NULL;
@@ -638,14 +688,28 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
638 688
639 if (connkeys && connkeys->def >= 0) { 689 if (connkeys && connkeys->def >= 0) {
640 int idx; 690 int idx;
691 u32 cipher;
641 692
642 idx = connkeys->def; 693 idx = connkeys->def;
694 cipher = connkeys->params[idx].cipher;
643 /* If given a WEP key we may need it for shared key auth */ 695 /* If given a WEP key we may need it for shared key auth */
644 if (connkeys->params[idx].cipher == WLAN_CIPHER_SUITE_WEP40 || 696 if (cipher == WLAN_CIPHER_SUITE_WEP40 ||
645 connkeys->params[idx].cipher == WLAN_CIPHER_SUITE_WEP104) { 697 cipher == WLAN_CIPHER_SUITE_WEP104) {
646 connect->key_idx = idx; 698 connect->key_idx = idx;
647 connect->key = connkeys->params[idx].key; 699 connect->key = connkeys->params[idx].key;
648 connect->key_len = connkeys->params[idx].key_len; 700 connect->key_len = connkeys->params[idx].key_len;
701
702 /*
703 * If ciphers are not set (e.g. when going through
704 * iwconfig), we have to set them appropriately here.
705 */
706 if (connect->crypto.cipher_group == 0)
707 connect->crypto.cipher_group = cipher;
708
709 if (connect->crypto.n_ciphers_pairwise == 0) {
710 connect->crypto.n_ciphers_pairwise = 1;
711 connect->crypto.ciphers_pairwise[0] = cipher;
712 }
649 } 713 }
650 } 714 }
651 715
@@ -701,6 +765,11 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
701 wdev->sme_state = CFG80211_SME_CONNECTING; 765 wdev->sme_state = CFG80211_SME_CONNECTING;
702 wdev->connect_keys = connkeys; 766 wdev->connect_keys = connkeys;
703 767
768 if (prev_bssid) {
769 memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
770 wdev->conn->prev_bssid_valid = true;
771 }
772
704 /* we're good if we have both BSSID and channel */ 773 /* we're good if we have both BSSID and channel */
705 if (wdev->conn->params.bssid && wdev->conn->params.channel) { 774 if (wdev->conn->params.bssid && wdev->conn->params.channel) {
706 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; 775 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
@@ -719,6 +788,7 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
719 } 788 }
720 } 789 }
721 if (err) { 790 if (err) {
791 kfree(wdev->conn->ie);
722 kfree(wdev->conn); 792 kfree(wdev->conn);
723 wdev->conn = NULL; 793 wdev->conn = NULL;
724 wdev->sme_state = CFG80211_SME_IDLE; 794 wdev->sme_state = CFG80211_SME_IDLE;
@@ -751,9 +821,11 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
751{ 821{
752 int err; 822 int err;
753 823
824 mutex_lock(&rdev->devlist_mtx);
754 wdev_lock(dev->ieee80211_ptr); 825 wdev_lock(dev->ieee80211_ptr);
755 err = __cfg80211_connect(rdev, dev, connect, connkeys); 826 err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
756 wdev_unlock(dev->ieee80211_ptr); 827 wdev_unlock(dev->ieee80211_ptr);
828 mutex_unlock(&rdev->devlist_mtx);
757 829
758 return err; 830 return err;
759} 831}
@@ -786,6 +858,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
786 (wdev->conn->state == CFG80211_CONN_SCANNING || 858 (wdev->conn->state == CFG80211_CONN_SCANNING ||
787 wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { 859 wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) {
788 wdev->sme_state = CFG80211_SME_IDLE; 860 wdev->sme_state = CFG80211_SME_IDLE;
861 kfree(wdev->conn->ie);
789 kfree(wdev->conn); 862 kfree(wdev->conn);
790 wdev->conn = NULL; 863 wdev->conn = NULL;
791 wdev->ssid_len = 0; 864 wdev->ssid_len = 0;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ba387d85dcfd..3fc2df86278f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -274,11 +274,11 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
274 switch (ae) { 274 switch (ae) {
275 case 0: 275 case 0:
276 return 6; 276 return 6;
277 case 1: 277 case MESH_FLAGS_AE_A4:
278 return 12; 278 return 12;
279 case 2: 279 case MESH_FLAGS_AE_A5_A6:
280 return 18; 280 return 18;
281 case 3: 281 case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6):
282 return 24; 282 return 24;
283 default: 283 default:
284 return 6; 284 return 6;
@@ -333,10 +333,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
333 } 333 }
334 break; 334 break;
335 case cpu_to_le16(IEEE80211_FCTL_FROMDS): 335 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
336 if (iftype != NL80211_IFTYPE_STATION || 336 if ((iftype != NL80211_IFTYPE_STATION &&
337 iftype != NL80211_IFTYPE_MESH_POINT) ||
337 (is_multicast_ether_addr(dst) && 338 (is_multicast_ether_addr(dst) &&
338 !compare_ether_addr(src, addr))) 339 !compare_ether_addr(src, addr)))
339 return -1; 340 return -1;
341 if (iftype == NL80211_IFTYPE_MESH_POINT) {
342 struct ieee80211s_hdr *meshdr =
343 (struct ieee80211s_hdr *) (skb->data + hdrlen);
344 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
345 if (meshdr->flags & MESH_FLAGS_AE_A4)
346 memcpy(src, meshdr->eaddr1, ETH_ALEN);
347 }
340 break; 348 break;
341 case cpu_to_le16(0): 349 case cpu_to_le16(0):
342 if (iftype != NL80211_IFTYPE_ADHOC) 350 if (iftype != NL80211_IFTYPE_ADHOC)
@@ -566,3 +574,111 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
566 kfree(wdev->connect_keys); 574 kfree(wdev->connect_keys);
567 wdev->connect_keys = NULL; 575 wdev->connect_keys = NULL;
568} 576}
577
578static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
579{
580 struct cfg80211_event *ev;
581 unsigned long flags;
582 const u8 *bssid = NULL;
583
584 spin_lock_irqsave(&wdev->event_lock, flags);
585 while (!list_empty(&wdev->event_list)) {
586 ev = list_first_entry(&wdev->event_list,
587 struct cfg80211_event, list);
588 list_del(&ev->list);
589 spin_unlock_irqrestore(&wdev->event_lock, flags);
590
591 wdev_lock(wdev);
592 switch (ev->type) {
593 case EVENT_CONNECT_RESULT:
594 if (!is_zero_ether_addr(ev->cr.bssid))
595 bssid = ev->cr.bssid;
596 __cfg80211_connect_result(
597 wdev->netdev, bssid,
598 ev->cr.req_ie, ev->cr.req_ie_len,
599 ev->cr.resp_ie, ev->cr.resp_ie_len,
600 ev->cr.status,
601 ev->cr.status == WLAN_STATUS_SUCCESS,
602 NULL);
603 break;
604 case EVENT_ROAMED:
605 __cfg80211_roamed(wdev, ev->rm.bssid,
606 ev->rm.req_ie, ev->rm.req_ie_len,
607 ev->rm.resp_ie, ev->rm.resp_ie_len);
608 break;
609 case EVENT_DISCONNECTED:
610 __cfg80211_disconnected(wdev->netdev,
611 ev->dc.ie, ev->dc.ie_len,
612 ev->dc.reason, true);
613 break;
614 case EVENT_IBSS_JOINED:
615 __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
616 break;
617 }
618 wdev_unlock(wdev);
619
620 kfree(ev);
621
622 spin_lock_irqsave(&wdev->event_lock, flags);
623 }
624 spin_unlock_irqrestore(&wdev->event_lock, flags);
625}
626
627void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
628{
629 struct wireless_dev *wdev;
630
631 ASSERT_RTNL();
632 ASSERT_RDEV_LOCK(rdev);
633
634 mutex_lock(&rdev->devlist_mtx);
635
636 list_for_each_entry(wdev, &rdev->netdev_list, list)
637 cfg80211_process_wdev_events(wdev);
638
639 mutex_unlock(&rdev->devlist_mtx);
640}
641
642int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
643 struct net_device *dev, enum nl80211_iftype ntype,
644 u32 *flags, struct vif_params *params)
645{
646 int err;
647 enum nl80211_iftype otype = dev->ieee80211_ptr->iftype;
648
649 ASSERT_RDEV_LOCK(rdev);
650
651 /* don't support changing VLANs, you just re-create them */
652 if (otype == NL80211_IFTYPE_AP_VLAN)
653 return -EOPNOTSUPP;
654
655 if (!rdev->ops->change_virtual_intf ||
656 !(rdev->wiphy.interface_modes & (1 << ntype)))
657 return -EOPNOTSUPP;
658
659 if (ntype != otype) {
660 switch (otype) {
661 case NL80211_IFTYPE_ADHOC:
662 cfg80211_leave_ibss(rdev, dev, false);
663 break;
664 case NL80211_IFTYPE_STATION:
665 cfg80211_disconnect(rdev, dev,
666 WLAN_REASON_DEAUTH_LEAVING, true);
667 break;
668 case NL80211_IFTYPE_MESH_POINT:
669 /* mesh should be handled? */
670 break;
671 default:
672 break;
673 }
674
675 cfg80211_process_rdev_events(rdev);
676 }
677
678 err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev,
679 ntype, flags, params);
680
681 WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
682
683 return err;
684}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index e4e90e249bab..429dd06a4ecc 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -70,18 +70,8 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
70 enum nl80211_iftype type; 70 enum nl80211_iftype type;
71 int ret; 71 int ret;
72 72
73 if (!wdev)
74 return -EOPNOTSUPP;
75
76 rdev = wiphy_to_dev(wdev->wiphy); 73 rdev = wiphy_to_dev(wdev->wiphy);
77 74
78 if (!rdev->ops->change_virtual_intf)
79 return -EOPNOTSUPP;
80
81 /* don't support changing VLANs, you just re-create them */
82 if (wdev->iftype == NL80211_IFTYPE_AP_VLAN)
83 return -EOPNOTSUPP;
84
85 switch (*mode) { 75 switch (*mode) {
86 case IW_MODE_INFRA: 76 case IW_MODE_INFRA:
87 type = NL80211_IFTYPE_STATION; 77 type = NL80211_IFTYPE_STATION;
@@ -104,9 +94,9 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
104 94
105 memset(&vifparams, 0, sizeof(vifparams)); 95 memset(&vifparams, 0, sizeof(vifparams));
106 96
107 ret = rdev->ops->change_virtual_intf(wdev->wiphy, dev, type, 97 cfg80211_lock_rdev(rdev);
108 NULL, &vifparams); 98 ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
109 WARN_ON(!ret && wdev->iftype != type); 99 cfg80211_unlock_rdev(rdev);
110 100
111 return ret; 101 return ret;
112} 102}
@@ -267,39 +257,26 @@ EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
267 * @wiphy: the wiphy 257 * @wiphy: the wiphy
268 * @freq: the wext freq encoding 258 * @freq: the wext freq encoding
269 * 259 *
270 * Returns a channel, %NULL for auto, or an ERR_PTR for errors! 260 * Returns a frequency, or a negative error code, or 0 for auto.
271 */ 261 */
272struct ieee80211_channel *cfg80211_wext_freq(struct wiphy *wiphy, 262int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
273 struct iw_freq *freq)
274{ 263{
275 struct ieee80211_channel *chan;
276 int f;
277
278 /* 264 /*
279 * Parse frequency - return NULL for auto and 265 * Parse frequency - return 0 for auto and
280 * -EINVAL for impossible things. 266 * -EINVAL for impossible things.
281 */ 267 */
282 if (freq->e == 0) { 268 if (freq->e == 0) {
283 if (freq->m < 0) 269 if (freq->m < 0)
284 return NULL; 270 return 0;
285 f = ieee80211_channel_to_frequency(freq->m); 271 return ieee80211_channel_to_frequency(freq->m);
286 } else { 272 } else {
287 int i, div = 1000000; 273 int i, div = 1000000;
288 for (i = 0; i < freq->e; i++) 274 for (i = 0; i < freq->e; i++)
289 div /= 10; 275 div /= 10;
290 if (div <= 0) 276 if (div <= 0)
291 return ERR_PTR(-EINVAL); 277 return -EINVAL;
292 f = freq->m / div; 278 return freq->m / div;
293 } 279 }
294
295 /*
296 * Look up channel struct and return -EINVAL when
297 * it cannot be found.
298 */
299 chan = ieee80211_get_channel(wiphy, f);
300 if (!chan)
301 return ERR_PTR(-EINVAL);
302 return chan;
303} 280}
304 281
305int cfg80211_wext_siwrts(struct net_device *dev, 282int cfg80211_wext_siwrts(struct net_device *dev,
@@ -761,30 +738,27 @@ EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
761 738
762int cfg80211_wext_siwfreq(struct net_device *dev, 739int cfg80211_wext_siwfreq(struct net_device *dev,
763 struct iw_request_info *info, 740 struct iw_request_info *info,
764 struct iw_freq *freq, char *extra) 741 struct iw_freq *wextfreq, char *extra)
765{ 742{
766 struct wireless_dev *wdev = dev->ieee80211_ptr; 743 struct wireless_dev *wdev = dev->ieee80211_ptr;
767 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 744 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
768 struct ieee80211_channel *chan; 745 int freq, err;
769 int err;
770 746
771 switch (wdev->iftype) { 747 switch (wdev->iftype) {
772 case NL80211_IFTYPE_STATION: 748 case NL80211_IFTYPE_STATION:
773 return cfg80211_mgd_wext_siwfreq(dev, info, freq, extra); 749 return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
774 case NL80211_IFTYPE_ADHOC: 750 case NL80211_IFTYPE_ADHOC:
775 return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra); 751 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
776 default: 752 default:
777 chan = cfg80211_wext_freq(wdev->wiphy, freq); 753 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
778 if (!chan) 754 if (freq < 0)
755 return freq;
756 if (freq == 0)
779 return -EINVAL; 757 return -EINVAL;
780 if (IS_ERR(chan)) 758 mutex_lock(&rdev->devlist_mtx);
781 return PTR_ERR(chan); 759 err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT);
782 err = rdev->ops->set_channel(wdev->wiphy, chan, 760 mutex_unlock(&rdev->devlist_mtx);
783 NL80211_CHAN_NO_HT); 761 return err;
784 if (err)
785 return err;
786 rdev->channel = chan;
787 return 0;
788 } 762 }
789} 763}
790EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq); 764EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 9a3774749589..20b3daef6964 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -42,8 +42,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
42 struct iw_request_info *info, 42 struct iw_request_info *info,
43 struct iw_point *data, char *ssid); 43 struct iw_point *data, char *ssid);
44 44
45struct ieee80211_channel *cfg80211_wext_freq(struct wiphy *wiphy, 45int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
46 struct iw_freq *freq);
47 46
48 47
49extern const struct iw_handler_def cfg80211_wext_handler; 48extern const struct iw_handler_def cfg80211_wext_handler;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7bacbd1c2af6..d16cd9ea4d00 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -15,6 +15,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
15 struct wireless_dev *wdev) 15 struct wireless_dev *wdev)
16{ 16{
17 struct cfg80211_cached_keys *ck = NULL; 17 struct cfg80211_cached_keys *ck = NULL;
18 const u8 *prev_bssid = NULL;
18 int err, i; 19 int err, i;
19 20
20 ASSERT_RDEV_LOCK(rdev); 21 ASSERT_RDEV_LOCK(rdev);
@@ -42,8 +43,12 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
42 for (i = 0; i < 6; i++) 43 for (i = 0; i < 6; i++)
43 ck->params[i].key = ck->data[i]; 44 ck->params[i].key = ck->data[i];
44 } 45 }
46
47 if (wdev->wext.prev_bssid_valid)
48 prev_bssid = wdev->wext.prev_bssid;
49
45 err = __cfg80211_connect(rdev, wdev->netdev, 50 err = __cfg80211_connect(rdev, wdev->netdev,
46 &wdev->wext.connect, ck); 51 &wdev->wext.connect, ck, prev_bssid);
47 if (err) 52 if (err)
48 kfree(ck); 53 kfree(ck);
49 54
@@ -52,25 +57,31 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
52 57
53int cfg80211_mgd_wext_siwfreq(struct net_device *dev, 58int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
54 struct iw_request_info *info, 59 struct iw_request_info *info,
55 struct iw_freq *freq, char *extra) 60 struct iw_freq *wextfreq, char *extra)
56{ 61{
57 struct wireless_dev *wdev = dev->ieee80211_ptr; 62 struct wireless_dev *wdev = dev->ieee80211_ptr;
58 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 63 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
59 struct ieee80211_channel *chan; 64 struct ieee80211_channel *chan = NULL;
60 int err; 65 int err, freq;
61 66
62 /* call only for station! */ 67 /* call only for station! */
63 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 68 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
64 return -EINVAL; 69 return -EINVAL;
65 70
66 chan = cfg80211_wext_freq(wdev->wiphy, freq); 71 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
67 if (chan && IS_ERR(chan)) 72 if (freq < 0)
68 return PTR_ERR(chan); 73 return freq;
69 74
70 if (chan && (chan->flags & IEEE80211_CHAN_DISABLED)) 75 if (freq) {
71 return -EINVAL; 76 chan = ieee80211_get_channel(wdev->wiphy, freq);
77 if (!chan)
78 return -EINVAL;
79 if (chan->flags & IEEE80211_CHAN_DISABLED)
80 return -EINVAL;
81 }
72 82
73 cfg80211_lock_rdev(rdev); 83 cfg80211_lock_rdev(rdev);
84 mutex_lock(&rdev->devlist_mtx);
74 wdev_lock(wdev); 85 wdev_lock(wdev);
75 86
76 if (wdev->sme_state != CFG80211_SME_IDLE) { 87 if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -84,9 +95,8 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
84 /* if SSID set, we'll try right again, avoid event */ 95 /* if SSID set, we'll try right again, avoid event */
85 if (wdev->wext.connect.ssid_len) 96 if (wdev->wext.connect.ssid_len)
86 event = false; 97 event = false;
87 err = __cfg80211_disconnect(wiphy_to_dev(wdev->wiphy), 98 err = __cfg80211_disconnect(rdev, dev,
88 dev, WLAN_REASON_DEAUTH_LEAVING, 99 WLAN_REASON_DEAUTH_LEAVING, event);
89 event);
90 if (err) 100 if (err)
91 goto out; 101 goto out;
92 } 102 }
@@ -95,17 +105,15 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
95 wdev->wext.connect.channel = chan; 105 wdev->wext.connect.channel = chan;
96 106
97 /* SSID is not set, we just want to switch channel */ 107 /* SSID is not set, we just want to switch channel */
98 if (wdev->wext.connect.ssid_len && chan) { 108 if (chan && !wdev->wext.connect.ssid_len) {
99 err = -EOPNOTSUPP; 109 err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
100 if (rdev->ops->set_channel)
101 err = rdev->ops->set_channel(wdev->wiphy, chan,
102 NL80211_CHAN_NO_HT);
103 goto out; 110 goto out;
104 } 111 }
105 112
106 err = cfg80211_mgd_wext_connect(wiphy_to_dev(wdev->wiphy), wdev); 113 err = cfg80211_mgd_wext_connect(rdev, wdev);
107 out: 114 out:
108 wdev_unlock(wdev); 115 wdev_unlock(wdev);
116 mutex_unlock(&rdev->devlist_mtx);
109 cfg80211_unlock_rdev(rdev); 117 cfg80211_unlock_rdev(rdev);
110 return err; 118 return err;
111} 119}
@@ -143,6 +151,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
143 struct iw_point *data, char *ssid) 151 struct iw_point *data, char *ssid)
144{ 152{
145 struct wireless_dev *wdev = dev->ieee80211_ptr; 153 struct wireless_dev *wdev = dev->ieee80211_ptr;
154 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
146 size_t len = data->length; 155 size_t len = data->length;
147 int err; 156 int err;
148 157
@@ -157,7 +166,8 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
157 if (len > 0 && ssid[len - 1] == '\0') 166 if (len > 0 && ssid[len - 1] == '\0')
158 len--; 167 len--;
159 168
160 cfg80211_lock_rdev(wiphy_to_dev(wdev->wiphy)); 169 cfg80211_lock_rdev(rdev);
170 mutex_lock(&rdev->devlist_mtx);
161 wdev_lock(wdev); 171 wdev_lock(wdev);
162 172
163 err = 0; 173 err = 0;
@@ -173,23 +183,24 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
173 /* if SSID set now, we'll try to connect, avoid event */ 183 /* if SSID set now, we'll try to connect, avoid event */
174 if (len) 184 if (len)
175 event = false; 185 event = false;
176 err = __cfg80211_disconnect(wiphy_to_dev(wdev->wiphy), 186 err = __cfg80211_disconnect(rdev, dev,
177 dev, WLAN_REASON_DEAUTH_LEAVING, 187 WLAN_REASON_DEAUTH_LEAVING, event);
178 event);
179 if (err) 188 if (err)
180 goto out; 189 goto out;
181 } 190 }
182 191
192 wdev->wext.prev_bssid_valid = false;
183 wdev->wext.connect.ssid = wdev->wext.ssid; 193 wdev->wext.connect.ssid = wdev->wext.ssid;
184 memcpy(wdev->wext.ssid, ssid, len); 194 memcpy(wdev->wext.ssid, ssid, len);
185 wdev->wext.connect.ssid_len = len; 195 wdev->wext.connect.ssid_len = len;
186 196
187 wdev->wext.connect.crypto.control_port = false; 197 wdev->wext.connect.crypto.control_port = false;
188 198
189 err = cfg80211_mgd_wext_connect(wiphy_to_dev(wdev->wiphy), wdev); 199 err = cfg80211_mgd_wext_connect(rdev, wdev);
190 out: 200 out:
191 wdev_unlock(wdev); 201 wdev_unlock(wdev);
192 cfg80211_unlock_rdev(wiphy_to_dev(wdev->wiphy)); 202 mutex_unlock(&rdev->devlist_mtx);
203 cfg80211_unlock_rdev(rdev);
193 return err; 204 return err;
194} 205}
195 206
@@ -206,7 +217,15 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
206 data->flags = 0; 217 data->flags = 0;
207 218
208 wdev_lock(wdev); 219 wdev_lock(wdev);
209 if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { 220 if (wdev->current_bss) {
221 const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
222 WLAN_EID_SSID);
223 if (ie) {
224 data->flags = 1;
225 data->length = ie[1];
226 memcpy(ssid, ie + 2, data->length);
227 }
228 } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
210 data->flags = 1; 229 data->flags = 1;
211 data->length = wdev->wext.connect.ssid_len; 230 data->length = wdev->wext.connect.ssid_len;
212 memcpy(ssid, wdev->wext.connect.ssid, data->length); 231 memcpy(ssid, wdev->wext.connect.ssid, data->length);
@@ -222,6 +241,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
222 struct sockaddr *ap_addr, char *extra) 241 struct sockaddr *ap_addr, char *extra)
223{ 242{
224 struct wireless_dev *wdev = dev->ieee80211_ptr; 243 struct wireless_dev *wdev = dev->ieee80211_ptr;
244 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
225 u8 *bssid = ap_addr->sa_data; 245 u8 *bssid = ap_addr->sa_data;
226 int err; 246 int err;
227 247
@@ -236,7 +256,8 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
236 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) 256 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
237 bssid = NULL; 257 bssid = NULL;
238 258
239 cfg80211_lock_rdev(wiphy_to_dev(wdev->wiphy)); 259 cfg80211_lock_rdev(rdev);
260 mutex_lock(&rdev->devlist_mtx);
240 wdev_lock(wdev); 261 wdev_lock(wdev);
241 262
242 if (wdev->sme_state != CFG80211_SME_IDLE) { 263 if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -250,9 +271,8 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
250 compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) 271 compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0)
251 goto out; 272 goto out;
252 273
253 err = __cfg80211_disconnect(wiphy_to_dev(wdev->wiphy), 274 err = __cfg80211_disconnect(rdev, dev,
254 dev, WLAN_REASON_DEAUTH_LEAVING, 275 WLAN_REASON_DEAUTH_LEAVING, false);
255 false);
256 if (err) 276 if (err)
257 goto out; 277 goto out;
258 } 278 }
@@ -263,10 +283,11 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
263 } else 283 } else
264 wdev->wext.connect.bssid = NULL; 284 wdev->wext.connect.bssid = NULL;
265 285
266 err = cfg80211_mgd_wext_connect(wiphy_to_dev(wdev->wiphy), wdev); 286 err = cfg80211_mgd_wext_connect(rdev, wdev);
267 out: 287 out:
268 wdev_unlock(wdev); 288 wdev_unlock(wdev);
269 cfg80211_unlock_rdev(wiphy_to_dev(wdev->wiphy)); 289 mutex_unlock(&rdev->devlist_mtx);
290 cfg80211_unlock_rdev(rdev);
270 return err; 291 return err;
271} 292}
272 293
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 21cdc872004e..5e6c072c64d3 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -40,6 +40,7 @@
40#include <linux/errno.h> 40#include <linux/errno.h>
41#include <linux/kernel.h> 41#include <linux/kernel.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/smp_lock.h>
43#include <linux/timer.h> 44#include <linux/timer.h>
44#include <linux/string.h> 45#include <linux/string.h>
45#include <linux/net.h> 46#include <linux/net.h>
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index d401dc8f05ed..e5195c99f71e 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -16,7 +16,7 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
16 16
17static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) 17static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
18{ 18{
19 return ntohl(daddr->a4 ^ saddr->a4); 19 return ntohl(daddr->a4 + saddr->a4);
20} 20}
21 21
22static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) 22static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index a2adb51849a9..fef8db553e8d 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -60,7 +60,7 @@ static int xfrm_statistics_seq_open(struct inode *inode, struct file *file)
60 return single_open_net(inode, file, xfrm_statistics_seq_show); 60 return single_open_net(inode, file, xfrm_statistics_seq_show);
61} 61}
62 62
63static struct file_operations xfrm_statistics_seq_fops = { 63static const struct file_operations xfrm_statistics_seq_fops = {
64 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
65 .open = xfrm_statistics_seq_open, 65 .open = xfrm_statistics_seq_open,
66 .read = seq_read, 66 .read = seq_read,