summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fddi.c4
-rw-r--r--net/802/hippi.c5
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c49
-rw-r--r--net/8021q/vlanproc.c3
-rw-r--r--net/appletalk/dev.c11
-rw-r--r--net/atm/br2684.c26
-rw-r--r--net/atm/clip.c28
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_sysfs_br.c3
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/core/datagram.c116
-rw-r--r--net/core/dev.c650
-rw-r--r--net/core/drop_monitor.c137
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/iovec.c33
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/net-traces.c4
-rw-r--r--net/core/net_namespace.c54
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/core/skbuff.c68
-rw-r--r--net/core/sock.c103
-rw-r--r--net/core/stream.c3
-rw-r--r--net/decnet/af_decnet.c19
-rw-r--r--net/decnet/dn_nsp_in.c17
-rw-r--r--net/decnet/dn_nsp_out.c8
-rw-r--r--net/decnet/dn_rules.c4
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/econet/af_econet.c18
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ipv4/af_inet.c24
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_hash.c1
-rw-r--r--net/ipv4/fib_lookup.h3
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_semantics.c3
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c23
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_input.c13
-rw-r--r--net/ipv4/ip_output.c12
-rw-r--r--net/ipv4/ip_sockglue.c84
-rw-r--r--net/ipv4/ipconfig.c41
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/proc.c10
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/tcp.c47
-rw-r--r--net/ipv4/tcp_input.c100
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/addrconf.c87
-rw-r--r--net/ipv6/af_inet6.c35
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6_input.c7
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/mcast.c19
-rw-r--r--net/ipv6/ndisc.c13
-rw-r--r--net/ipv6/proc.c10
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/sit.c97
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/tcp_ipv6.c9
-rw-r--r--net/irda/irlap_frame.c18
-rw-r--r--net/irda/irnetlink.c19
-rw-r--r--net/iucv/af_iucv.c408
-rw-r--r--net/iucv/iucv.c43
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c4
-rw-r--r--net/mac80211/Kconfig16
-rw-r--r--net/mac80211/agg-rx.c19
-rw-r--r--net/mac80211/agg-tx.c13
-rw-r--r--net/mac80211/cfg.c144
-rw-r--r--net/mac80211/debugfs.c74
-rw-r--r--net/mac80211/driver-ops.h184
-rw-r--r--net/mac80211/event.c17
-rw-r--r--net/mac80211/ht.c84
-rw-r--r--net/mac80211/ibss.c501
-rw-r--r--net/mac80211/ieee80211_i.h151
-rw-r--r--net/mac80211/iface.c113
-rw-r--r--net/mac80211/key.c29
-rw-r--r--net/mac80211/key.h3
-rw-r--r--net/mac80211/main.c315
-rw-r--r--net/mac80211/mesh.c46
-rw-r--r--net/mac80211/mesh.h16
-rw-r--r--net/mac80211/mesh_hwmp.c8
-rw-r--r--net/mac80211/mesh_plink.c21
-rw-r--r--net/mac80211/mlme.c749
-rw-r--r--net/mac80211/pm.c182
-rw-r--r--net/mac80211/rc80211_minstrel.c8
-rw-r--r--net/mac80211/rc80211_pid_algo.c8
-rw-r--r--net/mac80211/rx.c292
-rw-r--r--net/mac80211/scan.c436
-rw-r--r--net/mac80211/spectmgmt.c103
-rw-r--r--net/mac80211/sta_info.c103
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tkip.c6
-rw-r--r--net/mac80211/tx.c51
-rw-r--r--net/mac80211/util.c405
-rw-r--r--net/mac80211/wext.c557
-rw-r--r--net/mac80211/wme.c32
-rw-r--r--net/mac80211/wpa.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c18
-rw-r--r--net/netlabel/netlabel_cipso_v4.c16
-rw-r--r--net/netlabel/netlabel_mgmt.c16
-rw-r--r--net/netlabel/netlabel_unlabeled.c16
-rw-r--r--net/netlink/genetlink.c46
-rw-r--r--net/packet/af_packet.c598
-rw-r--r--net/phonet/pep-gprs.c5
-rw-r--r--net/rds/af_rds.c1
-rw-r--r--net/rds/connection.c4
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_recv.c2
-rw-r--r--net/rds/ib_ring.c2
-rw-r--r--net/rds/ib_send.c10
-rw-r--r--net/rds/info.c5
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/iw.h2
-rw-r--r--net/rds/iw_recv.c2
-rw-r--r--net/rds/iw_ring.c2
-rw-r--r--net/rds/iw_send.c10
-rw-r--r--net/rds/rdma.c7
-rw-r--r--net/rds/rdma_transport.c12
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/send.c10
-rw-r--r--net/rfkill/rfkill-input.c69
-rw-r--r--net/rfkill/rfkill.c59
-rw-r--r--net/sched/cls_cgroup.c6
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_teql.c18
-rw-r--r--net/sctp/output.c17
-rw-r--r--net/tipc/eth_media.c2
-rw-r--r--net/tipc/netlink.c38
-rw-r--r--net/wimax/Makefile1
-rw-r--r--net/wimax/debug-levels.h1
-rw-r--r--net/wimax/debugfs.c1
-rw-r--r--net/wimax/op-msg.c17
-rw-r--r--net/wimax/op-rfkill.c9
-rw-r--r--net/wimax/op-state-get.c86
-rw-r--r--net/wimax/stack.c5
-rw-r--r--net/wireless/Kconfig8
-rw-r--r--net/wireless/Makefile3
-rw-r--r--net/wireless/core.c40
-rw-r--r--net/wireless/core.h35
-rw-r--r--net/wireless/debugfs.c131
-rw-r--r--net/wireless/debugfs.h14
-rw-r--r--net/wireless/ibss.c369
-rw-r--r--net/wireless/mlme.c50
-rw-r--r--net/wireless/nl80211.c870
-rw-r--r--net/wireless/nl80211.h32
-rw-r--r--net/wireless/reg.c259
-rw-r--r--net/wireless/scan.c63
-rw-r--r--net/wireless/util.c371
-rw-r--r--net/wireless/wext-compat.c517
-rw-r--r--net/wireless/wext.c48
162 files changed, 7705 insertions, 3564 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 539e6064e6d4..3ef0ab0a543a 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -185,10 +185,6 @@ static const struct header_ops fddi_header_ops = {
185static void fddi_setup(struct net_device *dev) 185static void fddi_setup(struct net_device *dev)
186{ 186{
187 dev->header_ops = &fddi_header_ops; 187 dev->header_ops = &fddi_header_ops;
188#ifdef CONFIG_COMPAT_NET_DEV_OPS
189 dev->change_mtu = fddi_change_mtu,
190#endif
191
192 dev->type = ARPHRD_FDDI; 188 dev->type = ARPHRD_FDDI;
193 dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */ 189 dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
194 dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */ 190 dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 313b9ebf92ee..cd3e8e929529 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -193,11 +193,6 @@ static const struct header_ops hippi_header_ops = {
193 193
194static void hippi_setup(struct net_device *dev) 194static void hippi_setup(struct net_device *dev)
195{ 195{
196#ifdef CONFIG_COMPAT_NET_DEV_OPS
197 dev->change_mtu = hippi_change_mtu;
198 dev->set_mac_address = hippi_mac_addr;
199 dev->neigh_setup = hippi_neigh_setup_dev;
200#endif
201 dev->header_ops = &hippi_header_ops; 196 dev->header_ops = &hippi_header_ops;
202 197
203 /* 198 /*
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index d1e10546eb85..714e1c3536be 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -378,13 +378,13 @@ static void vlan_sync_address(struct net_device *dev,
378 * the new address */ 378 * the new address */
379 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 379 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
380 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 380 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
381 dev_unicast_delete(dev, vlandev->dev_addr, ETH_ALEN); 381 dev_unicast_delete(dev, vlandev->dev_addr);
382 382
383 /* vlan address was equal to the old address and is different from 383 /* vlan address was equal to the old address and is different from
384 * the new address */ 384 * the new address */
385 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 385 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
386 compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 386 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
387 dev_unicast_add(dev, vlandev->dev_addr, ETH_ALEN); 387 dev_unicast_add(dev, vlandev->dev_addr);
388 388
389 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 389 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
390} 390}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index c67fe6f75653..7f7de1a04de6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -114,9 +114,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
114EXPORT_SYMBOL(vlan_gro_receive); 114EXPORT_SYMBOL(vlan_gro_receive);
115 115
116int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 116int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
117 unsigned int vlan_tci, struct napi_gro_fraginfo *info) 117 unsigned int vlan_tci)
118{ 118{
119 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 119 struct sk_buff *skb = napi_frags_skb(napi);
120 120
121 if (!skb) 121 if (!skb)
122 return NET_RX_DROP; 122 return NET_RX_DROP;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b4b9068e55a7..96bad8f233e2 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -290,7 +290,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
290 290
291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
292{ 292{
293 struct net_device_stats *stats = &dev->stats; 293 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
295 295
296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
@@ -309,7 +309,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
309 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 309 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
310 skb = __vlan_put_tag(skb, vlan_tci); 310 skb = __vlan_put_tag(skb, vlan_tci);
311 if (!skb) { 311 if (!skb) {
312 stats->tx_dropped++; 312 txq->tx_dropped++;
313 return NETDEV_TX_OK; 313 return NETDEV_TX_OK;
314 } 314 }
315 315
@@ -317,8 +317,8 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
318 } 318 }
319 319
320 stats->tx_packets++; 320 txq->tx_packets++;
321 stats->tx_bytes += skb->len; 321 txq->tx_bytes += skb->len;
322 322
323 skb->dev = vlan_dev_info(dev)->real_dev; 323 skb->dev = vlan_dev_info(dev)->real_dev;
324 dev_queue_xmit(skb); 324 dev_queue_xmit(skb);
@@ -328,15 +328,15 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
329 struct net_device *dev) 329 struct net_device *dev)
330{ 330{
331 struct net_device_stats *stats = &dev->stats; 331 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
332 u16 vlan_tci; 332 u16 vlan_tci;
333 333
334 vlan_tci = vlan_dev_info(dev)->vlan_id; 334 vlan_tci = vlan_dev_info(dev)->vlan_id;
335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
337 337
338 stats->tx_packets++; 338 txq->tx_packets++;
339 stats->tx_bytes += skb->len; 339 txq->tx_bytes += skb->len;
340 340
341 skb->dev = vlan_dev_info(dev)->real_dev; 341 skb->dev = vlan_dev_info(dev)->real_dev;
342 dev_queue_xmit(skb); 342 dev_queue_xmit(skb);
@@ -441,7 +441,7 @@ static int vlan_dev_open(struct net_device *dev)
441 return -ENETDOWN; 441 return -ENETDOWN;
442 442
443 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 443 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
444 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); 444 err = dev_unicast_add(real_dev, dev->dev_addr);
445 if (err < 0) 445 if (err < 0)
446 goto out; 446 goto out;
447 } 447 }
@@ -470,7 +470,7 @@ clear_allmulti:
470 dev_set_allmulti(real_dev, -1); 470 dev_set_allmulti(real_dev, -1);
471del_unicast: 471del_unicast:
472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
473 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 473 dev_unicast_delete(real_dev, dev->dev_addr);
474out: 474out:
475 netif_carrier_off(dev); 475 netif_carrier_off(dev);
476 return err; 476 return err;
@@ -492,7 +492,7 @@ static int vlan_dev_stop(struct net_device *dev)
492 dev_set_promiscuity(real_dev, -1); 492 dev_set_promiscuity(real_dev, -1);
493 493
494 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 494 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
495 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); 495 dev_unicast_delete(real_dev, dev->dev_addr);
496 496
497 netif_carrier_off(dev); 497 netif_carrier_off(dev);
498 return 0; 498 return 0;
@@ -511,13 +511,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
511 goto out; 511 goto out;
512 512
513 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { 513 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
514 err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN); 514 err = dev_unicast_add(real_dev, addr->sa_data);
515 if (err < 0) 515 if (err < 0)
516 return err; 516 return err;
517 } 517 }
518 518
519 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 519 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
520 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 520 dev_unicast_delete(real_dev, dev->dev_addr);
521 521
522out: 522out:
523 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 523 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -644,7 +644,6 @@ static int vlan_dev_init(struct net_device *dev)
644 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 644 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
645 dev->netdev_ops = &vlan_netdev_ops; 645 dev->netdev_ops = &vlan_netdev_ops;
646 } 646 }
647 netdev_resync_ops(dev);
648 647
649 if (is_vlan_dev(real_dev)) 648 if (is_vlan_dev(real_dev))
650 subclass = 1; 649 subclass = 1;
@@ -671,13 +670,7 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
671 struct ethtool_cmd *cmd) 670 struct ethtool_cmd *cmd)
672{ 671{
673 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 672 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
674 struct net_device *real_dev = vlan->real_dev; 673 return dev_ethtool_get_settings(vlan->real_dev, cmd);
675
676 if (!real_dev->ethtool_ops ||
677 !real_dev->ethtool_ops->get_settings)
678 return -EOPNOTSUPP;
679
680 return real_dev->ethtool_ops->get_settings(real_dev, cmd);
681} 674}
682 675
683static void vlan_ethtool_get_drvinfo(struct net_device *dev, 676static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -691,24 +684,13 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
691static u32 vlan_ethtool_get_rx_csum(struct net_device *dev) 684static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
692{ 685{
693 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 686 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
694 struct net_device *real_dev = vlan->real_dev; 687 return dev_ethtool_get_rx_csum(vlan->real_dev);
695
696 if (real_dev->ethtool_ops == NULL ||
697 real_dev->ethtool_ops->get_rx_csum == NULL)
698 return 0;
699 return real_dev->ethtool_ops->get_rx_csum(real_dev);
700} 688}
701 689
702static u32 vlan_ethtool_get_flags(struct net_device *dev) 690static u32 vlan_ethtool_get_flags(struct net_device *dev)
703{ 691{
704 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 692 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
705 struct net_device *real_dev = vlan->real_dev; 693 return dev_ethtool_get_flags(vlan->real_dev);
706
707 if (!(real_dev->features & NETIF_F_HW_VLAN_RX) ||
708 real_dev->ethtool_ops == NULL ||
709 real_dev->ethtool_ops->get_flags == NULL)
710 return 0;
711 return real_dev->ethtool_ops->get_flags(real_dev);
712} 694}
713 695
714static const struct ethtool_ops vlan_ethtool_ops = { 696static const struct ethtool_ops vlan_ethtool_ops = {
@@ -756,6 +738,7 @@ void vlan_setup(struct net_device *dev)
756 ether_setup(dev); 738 ether_setup(dev);
757 739
758 dev->priv_flags |= IFF_802_1Q_VLAN; 740 dev->priv_flags |= IFF_802_1Q_VLAN;
741 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
759 dev->tx_queue_len = 0; 742 dev->tx_queue_len = 0;
760 743
761 dev->netdev_ops = &vlan_netdev_ops; 744 dev->netdev_ops = &vlan_netdev_ops;
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 3628e0a81b40..b55a091a33df 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -279,13 +279,14 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
279{ 279{
280 struct net_device *vlandev = (struct net_device *) seq->private; 280 struct net_device *vlandev = (struct net_device *) seq->private;
281 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); 281 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
282 struct net_device_stats *stats = &vlandev->stats; 282 const struct net_device_stats *stats;
283 static const char fmt[] = "%30s %12lu\n"; 283 static const char fmt[] = "%30s %12lu\n";
284 int i; 284 int i;
285 285
286 if (!is_vlan_dev(vlandev)) 286 if (!is_vlan_dev(vlandev))
287 return 0; 287 return 0;
288 288
289 stats = dev_get_stats(vlandev);
289 seq_printf(seq, 290 seq_printf(seq,
290 "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", 291 "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
291 vlandev->name, dev_info->vlan_id, 292 vlandev->name, dev_info->vlan_id,
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
index 72277d70c980..6c8016f61866 100644
--- a/net/appletalk/dev.c
+++ b/net/appletalk/dev.c
@@ -9,21 +9,10 @@
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/if_ltalk.h> 10#include <linux/if_ltalk.h>
11 11
12#ifdef CONFIG_COMPAT_NET_DEV_OPS
13static int ltalk_change_mtu(struct net_device *dev, int mtu)
14{
15 return -EINVAL;
16}
17#endif
18
19static void ltalk_setup(struct net_device *dev) 12static void ltalk_setup(struct net_device *dev)
20{ 13{
21 /* Fill in the fields of the device structure with localtalk-generic values. */ 14 /* Fill in the fields of the device structure with localtalk-generic values. */
22 15
23#ifdef CONFIG_COMPAT_NET_DEV_OPS
24 dev->change_mtu = ltalk_change_mtu;
25#endif
26
27 dev->type = ARPHRD_LOCALTLK; 16 dev->type = ARPHRD_LOCALTLK;
28 dev->hard_header_len = LTALK_HLEN; 17 dev->hard_header_len = LTALK_HLEN;
29 dev->mtu = LTALK_MTU; 18 dev->mtu = LTALK_MTU;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 3100a8940afc..bfa8fa9894fc 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -445,9 +445,10 @@ free_skb:
445 */ 445 */
446static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) 446static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
447{ 447{
448 struct sk_buff_head queue;
448 int err; 449 int err;
449 struct br2684_vcc *brvcc; 450 struct br2684_vcc *brvcc;
450 struct sk_buff *skb; 451 struct sk_buff *skb, *tmp;
451 struct sk_buff_head *rq; 452 struct sk_buff_head *rq;
452 struct br2684_dev *brdev; 453 struct br2684_dev *brdev;
453 struct net_device *net_dev; 454 struct net_device *net_dev;
@@ -505,29 +506,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
505 barrier(); 506 barrier();
506 atmvcc->push = br2684_push; 507 atmvcc->push = br2684_push;
507 508
509 __skb_queue_head_init(&queue);
508 rq = &sk_atm(atmvcc)->sk_receive_queue; 510 rq = &sk_atm(atmvcc)->sk_receive_queue;
509 511
510 spin_lock_irqsave(&rq->lock, flags); 512 spin_lock_irqsave(&rq->lock, flags);
511 if (skb_queue_empty(rq)) { 513 skb_queue_splice_init(rq, &queue);
512 skb = NULL;
513 } else {
514 /* NULL terminate the list. */
515 rq->prev->next = NULL;
516 skb = rq->next;
517 }
518 rq->prev = rq->next = (struct sk_buff *)rq;
519 rq->qlen = 0;
520 spin_unlock_irqrestore(&rq->lock, flags); 514 spin_unlock_irqrestore(&rq->lock, flags);
521 515
522 while (skb) { 516 skb_queue_walk_safe(&queue, skb, tmp) {
523 struct sk_buff *next = skb->next; 517 struct net_device *dev = skb->dev;
524 518
525 skb->next = skb->prev = NULL; 519 dev->stats.rx_bytes -= skb->len;
526 br2684_push(atmvcc, skb); 520 dev->stats.rx_packets--;
527 skb->dev->stats.rx_bytes -= skb->len;
528 skb->dev->stats.rx_packets--;
529 521
530 skb = next; 522 br2684_push(atmvcc, skb);
531 } 523 }
532 __module_get(THIS_MODULE); 524 __module_get(THIS_MODULE);
533 return 0; 525 return 0;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 3dc0a3a42a57..fb7623c080f8 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -445,9 +445,9 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
445 445
446static int clip_mkip(struct atm_vcc *vcc, int timeout) 446static int clip_mkip(struct atm_vcc *vcc, int timeout)
447{ 447{
448 struct sk_buff_head *rq, queue;
448 struct clip_vcc *clip_vcc; 449 struct clip_vcc *clip_vcc;
449 struct sk_buff *skb; 450 struct sk_buff *skb, *tmp;
450 struct sk_buff_head *rq;
451 unsigned long flags; 451 unsigned long flags;
452 452
453 if (!vcc->push) 453 if (!vcc->push)
@@ -469,39 +469,28 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
469 vcc->push = clip_push; 469 vcc->push = clip_push;
470 vcc->pop = clip_pop; 470 vcc->pop = clip_pop;
471 471
472 __skb_queue_head_init(&queue);
472 rq = &sk_atm(vcc)->sk_receive_queue; 473 rq = &sk_atm(vcc)->sk_receive_queue;
473 474
474 spin_lock_irqsave(&rq->lock, flags); 475 spin_lock_irqsave(&rq->lock, flags);
475 if (skb_queue_empty(rq)) { 476 skb_queue_splice_init(rq, &queue);
476 skb = NULL;
477 } else {
478 /* NULL terminate the list. */
479 rq->prev->next = NULL;
480 skb = rq->next;
481 }
482 rq->prev = rq->next = (struct sk_buff *)rq;
483 rq->qlen = 0;
484 spin_unlock_irqrestore(&rq->lock, flags); 477 spin_unlock_irqrestore(&rq->lock, flags);
485 478
486 /* re-process everything received between connection setup and MKIP */ 479 /* re-process everything received between connection setup and MKIP */
487 while (skb) { 480 skb_queue_walk_safe(&queue, skb, tmp) {
488 struct sk_buff *next = skb->next;
489
490 skb->next = skb->prev = NULL;
491 if (!clip_devs) { 481 if (!clip_devs) {
492 atm_return(vcc, skb->truesize); 482 atm_return(vcc, skb->truesize);
493 kfree_skb(skb); 483 kfree_skb(skb);
494 } else { 484 } else {
485 struct net_device *dev = skb->dev;
495 unsigned int len = skb->len; 486 unsigned int len = skb->len;
496 487
497 skb_get(skb); 488 skb_get(skb);
498 clip_push(vcc, skb); 489 clip_push(vcc, skb);
499 skb->dev->stats.rx_packets--; 490 dev->stats.rx_packets--;
500 skb->dev->stats.rx_bytes -= len; 491 dev->stats.rx_bytes -= len;
501 kfree_skb(skb); 492 kfree_skb(skb);
502 } 493 }
503
504 skb = next;
505 } 494 }
506 return 0; 495 return 0;
507} 496}
@@ -568,6 +557,7 @@ static void clip_setup(struct net_device *dev)
568 /* without any more elaborate queuing. 100 is a reasonable */ 557 /* without any more elaborate queuing. 100 is a reasonable */
569 /* compromise between decent burst-tolerance and protection */ 558 /* compromise between decent burst-tolerance and protection */
570 /* against memory hogs. */ 559 /* against memory hogs. */
560 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
571} 561}
572 562
573static int clip_create(int number) 563static int clip_create(int number)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index a48f5efdb6bf..cb3e97b93aeb 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -398,7 +398,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
398 if (unlikely(fdb->is_local)) { 398 if (unlikely(fdb->is_local)) {
399 if (net_ratelimit()) 399 if (net_ratelimit())
400 printk(KERN_WARNING "%s: received packet with " 400 printk(KERN_WARNING "%s: received packet with "
401 " own address as source address\n", 401 "own address as source address\n",
402 source->dev->name); 402 source->dev->name);
403 } else { 403 } else {
404 /* fastpath: update of existing entry */ 404 /* fastpath: update of existing entry */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 603d89248e71..ee4820aa1843 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -172,7 +172,8 @@ static ssize_t store_stp_state(struct device *d,
172 if (endp == buf) 172 if (endp == buf)
173 return -EINVAL; 173 return -EINVAL;
174 174
175 rtnl_lock(); 175 if (!rtnl_trylock())
176 return restart_syscall();
176 br_stp_set_enabled(br, val); 177 br_stp_set_enabled(br, val);
177 rtnl_unlock(); 178 rtnl_unlock();
178 179
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 02b2d50cce4d..4a3cdf8f3813 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -189,7 +189,8 @@ static ssize_t brport_store(struct kobject * kobj,
189 189
190 val = simple_strtoul(buf, &endp, 0); 190 val = simple_strtoul(buf, &endp, 0);
191 if (endp != buf) { 191 if (endp != buf) {
192 rtnl_lock(); 192 if (!rtnl_trylock())
193 return restart_syscall();
193 if (p->dev && p->br && brport_attr->store) { 194 if (p->dev && p->br && brport_attr->store) {
194 spin_lock_bh(&p->br->lock); 195 spin_lock_bh(&p->br->lock);
195 ret = brport_attr->store(p, val); 196 ret = brport_attr->store(p, val);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b01a76abe1d2..e2a36f05cdf7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -260,7 +260,9 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
260 spin_unlock_bh(&sk->sk_receive_queue.lock); 260 spin_unlock_bh(&sk->sk_receive_queue.lock);
261 } 261 }
262 262
263 skb_free_datagram(sk, skb); 263 kfree_skb(skb);
264 sk_mem_reclaim_partial(sk);
265
264 return err; 266 return err;
265} 267}
266 268
@@ -351,17 +353,111 @@ fault:
351} 353}
352 354
353/** 355/**
356 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
357 * @skb: buffer to copy
358 * @offset: offset in the buffer to start copying from
359 * @to: io vector to copy to
360 * @to_offset: offset in the io vector to start copying to
361 * @len: amount of data to copy from buffer to iovec
362 *
363 * Returns 0 or -EFAULT.
364 * Note: the iovec is not modified during the copy.
365 */
366int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
367 const struct iovec *to, int to_offset,
368 int len)
369{
370 int start = skb_headlen(skb);
371 int i, copy = start - offset;
372
373 /* Copy header. */
374 if (copy > 0) {
375 if (copy > len)
376 copy = len;
377 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
378 goto fault;
379 if ((len -= copy) == 0)
380 return 0;
381 offset += copy;
382 to_offset += copy;
383 }
384
385 /* Copy paged appendix. Hmm... why does this look so complicated? */
386 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
387 int end;
388
389 WARN_ON(start > offset + len);
390
391 end = start + skb_shinfo(skb)->frags[i].size;
392 if ((copy = end - offset) > 0) {
393 int err;
394 u8 *vaddr;
395 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
396 struct page *page = frag->page;
397
398 if (copy > len)
399 copy = len;
400 vaddr = kmap(page);
401 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
402 offset - start, to_offset, copy);
403 kunmap(page);
404 if (err)
405 goto fault;
406 if (!(len -= copy))
407 return 0;
408 offset += copy;
409 to_offset += copy;
410 }
411 start = end;
412 }
413
414 if (skb_shinfo(skb)->frag_list) {
415 struct sk_buff *list = skb_shinfo(skb)->frag_list;
416
417 for (; list; list = list->next) {
418 int end;
419
420 WARN_ON(start > offset + len);
421
422 end = start + list->len;
423 if ((copy = end - offset) > 0) {
424 if (copy > len)
425 copy = len;
426 if (skb_copy_datagram_const_iovec(list,
427 offset - start,
428 to, to_offset,
429 copy))
430 goto fault;
431 if ((len -= copy) == 0)
432 return 0;
433 offset += copy;
434 to_offset += copy;
435 }
436 start = end;
437 }
438 }
439 if (!len)
440 return 0;
441
442fault:
443 return -EFAULT;
444}
445EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
446
447/**
354 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. 448 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
355 * @skb: buffer to copy 449 * @skb: buffer to copy
356 * @offset: offset in the buffer to start copying to 450 * @offset: offset in the buffer to start copying to
357 * @from: io vector to copy to 451 * @from: io vector to copy to
452 * @from_offset: offset in the io vector to start copying from
358 * @len: amount of data to copy to buffer from iovec 453 * @len: amount of data to copy to buffer from iovec
359 * 454 *
360 * Returns 0 or -EFAULT. 455 * Returns 0 or -EFAULT.
361 * Note: the iovec is modified during the copy. 456 * Note: the iovec is not modified during the copy.
362 */ 457 */
363int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, 458int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
364 struct iovec *from, int len) 459 const struct iovec *from, int from_offset,
460 int len)
365{ 461{
366 int start = skb_headlen(skb); 462 int start = skb_headlen(skb);
367 int i, copy = start - offset; 463 int i, copy = start - offset;
@@ -370,11 +466,12 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
370 if (copy > 0) { 466 if (copy > 0) {
371 if (copy > len) 467 if (copy > len)
372 copy = len; 468 copy = len;
373 if (memcpy_fromiovec(skb->data + offset, from, copy)) 469 if (memcpy_fromiovecend(skb->data + offset, from, 0, copy))
374 goto fault; 470 goto fault;
375 if ((len -= copy) == 0) 471 if ((len -= copy) == 0)
376 return 0; 472 return 0;
377 offset += copy; 473 offset += copy;
474 from_offset += copy;
378 } 475 }
379 476
380 /* Copy paged appendix. Hmm... why does this look so complicated? */ 477 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -393,8 +490,9 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
393 if (copy > len) 490 if (copy > len)
394 copy = len; 491 copy = len;
395 vaddr = kmap(page); 492 vaddr = kmap(page);
396 err = memcpy_fromiovec(vaddr + frag->page_offset + 493 err = memcpy_fromiovecend(vaddr + frag->page_offset +
397 offset - start, from, copy); 494 offset - start,
495 from, from_offset, copy);
398 kunmap(page); 496 kunmap(page);
399 if (err) 497 if (err)
400 goto fault; 498 goto fault;
@@ -402,6 +500,7 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
402 if (!(len -= copy)) 500 if (!(len -= copy))
403 return 0; 501 return 0;
404 offset += copy; 502 offset += copy;
503 from_offset += copy;
405 } 504 }
406 start = end; 505 start = end;
407 } 506 }
@@ -420,11 +519,14 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
420 copy = len; 519 copy = len;
421 if (skb_copy_datagram_from_iovec(list, 520 if (skb_copy_datagram_from_iovec(list,
422 offset - start, 521 offset - start,
423 from, copy)) 522 from,
523 from_offset,
524 copy))
424 goto fault; 525 goto fault;
425 if ((len -= copy) == 0) 526 if ((len -= copy) == 0)
426 return 0; 527 return 0;
427 offset += copy; 528 offset += copy;
529 from_offset += copy;
428 } 530 }
429 start = end; 531 start = end;
430 } 532 }
diff --git a/net/core/dev.c b/net/core/dev.c
index e2e9e4af3ace..e2fcc5f10177 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -126,6 +126,7 @@
126#include <linux/in.h> 126#include <linux/in.h>
127#include <linux/jhash.h> 127#include <linux/jhash.h>
128#include <linux/random.h> 128#include <linux/random.h>
129#include <trace/napi.h>
129 130
130#include "net-sysfs.h" 131#include "net-sysfs.h"
131 132
@@ -1688,7 +1689,17 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1688 goto gso; 1689 goto gso;
1689 } 1690 }
1690 1691
1692 /*
1693 * If device doesnt need skb->dst, release it right now while
1694 * its hot in this cpu cache
1695 */
1696 if ((dev->priv_flags & IFF_XMIT_DST_RELEASE) && skb->dst) {
1697 dst_release(skb->dst);
1698 skb->dst = NULL;
1699 }
1691 rc = ops->ndo_start_xmit(skb, dev); 1700 rc = ops->ndo_start_xmit(skb, dev);
1701 if (rc == 0)
1702 txq_trans_update(txq);
1692 /* 1703 /*
1693 * TODO: if skb_orphan() was called by 1704 * TODO: if skb_orphan() was called by
1694 * dev->hard_start_xmit() (for example, the unmodified 1705 * dev->hard_start_xmit() (for example, the unmodified
@@ -1718,6 +1729,7 @@ gso:
1718 skb->next = nskb; 1729 skb->next = nskb;
1719 return rc; 1730 return rc;
1720 } 1731 }
1732 txq_trans_update(txq);
1721 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 1733 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1722 return NETDEV_TX_BUSY; 1734 return NETDEV_TX_BUSY;
1723 } while (skb->next); 1735 } while (skb->next);
@@ -1735,8 +1747,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1735{ 1747{
1736 u32 hash; 1748 u32 hash;
1737 1749
1738 if (skb_rx_queue_recorded(skb)) 1750 if (skb_rx_queue_recorded(skb)) {
1739 return skb_get_rx_queue(skb) % dev->real_num_tx_queues; 1751 hash = skb_get_rx_queue(skb);
1752 while (unlikely (hash >= dev->real_num_tx_queues))
1753 hash -= dev->real_num_tx_queues;
1754 return hash;
1755 }
1740 1756
1741 if (skb->sk && skb->sk->sk_hash) 1757 if (skb->sk && skb->sk->sk_hash)
1742 hash = skb->sk->sk_hash; 1758 hash = skb->sk->sk_hash;
@@ -2374,26 +2390,6 @@ void napi_gro_flush(struct napi_struct *napi)
2374} 2390}
2375EXPORT_SYMBOL(napi_gro_flush); 2391EXPORT_SYMBOL(napi_gro_flush);
2376 2392
2377void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2378{
2379 unsigned int offset = skb_gro_offset(skb);
2380
2381 hlen += offset;
2382 if (hlen <= skb_headlen(skb))
2383 return skb->data + offset;
2384
2385 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2386 skb_shinfo(skb)->frags[0].size <=
2387 hlen - skb_headlen(skb) ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2390
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset +
2393 offset - skb_headlen(skb);
2394}
2395EXPORT_SYMBOL(skb_gro_header);
2396
2397int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2398{ 2394{
2399 struct sk_buff **pp = NULL; 2395 struct sk_buff **pp = NULL;
@@ -2456,10 +2452,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2456 ret = GRO_HELD; 2452 ret = GRO_HELD;
2457 2453
2458pull: 2454pull:
2459 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { 2455 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2460 if (napi->gro_list == skb) 2456 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2461 napi->gro_list = skb->next; 2457
2462 ret = GRO_DROP; 2458 BUG_ON(skb->end - skb->tail < grow);
2459
2460 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2461
2462 skb->tail += grow;
2463 skb->data_len -= grow;
2464
2465 skb_shinfo(skb)->frags[0].page_offset += grow;
2466 skb_shinfo(skb)->frags[0].size -= grow;
2467
2468 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2469 put_page(skb_shinfo(skb)->frags[0].page);
2470 memmove(skb_shinfo(skb)->frags,
2471 skb_shinfo(skb)->frags + 1,
2472 --skb_shinfo(skb)->nr_frags);
2473 }
2463 } 2474 }
2464 2475
2465ok: 2476ok:
@@ -2509,6 +2520,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb)
2509} 2520}
2510EXPORT_SYMBOL(napi_skb_finish); 2521EXPORT_SYMBOL(napi_skb_finish);
2511 2522
2523void skb_gro_reset_offset(struct sk_buff *skb)
2524{
2525 NAPI_GRO_CB(skb)->data_offset = 0;
2526 NAPI_GRO_CB(skb)->frag0 = NULL;
2527 NAPI_GRO_CB(skb)->frag0_len = 0;
2528
2529 if (skb->mac_header == skb->tail &&
2530 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2531 NAPI_GRO_CB(skb)->frag0 =
2532 page_address(skb_shinfo(skb)->frags[0].page) +
2533 skb_shinfo(skb)->frags[0].page_offset;
2534 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2535 }
2536}
2537EXPORT_SYMBOL(skb_gro_reset_offset);
2538
2512int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2539int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2513{ 2540{
2514 skb_gro_reset_offset(skb); 2541 skb_gro_reset_offset(skb);
@@ -2526,16 +2553,10 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2526} 2553}
2527EXPORT_SYMBOL(napi_reuse_skb); 2554EXPORT_SYMBOL(napi_reuse_skb);
2528 2555
2529struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, 2556struct sk_buff *napi_get_frags(struct napi_struct *napi)
2530 struct napi_gro_fraginfo *info)
2531{ 2557{
2532 struct net_device *dev = napi->dev; 2558 struct net_device *dev = napi->dev;
2533 struct sk_buff *skb = napi->skb; 2559 struct sk_buff *skb = napi->skb;
2534 struct ethhdr *eth;
2535 skb_frag_t *frag;
2536 int i;
2537
2538 napi->skb = NULL;
2539 2560
2540 if (!skb) { 2561 if (!skb) {
2541 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); 2562 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
@@ -2543,47 +2564,14 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2543 goto out; 2564 goto out;
2544 2565
2545 skb_reserve(skb, NET_IP_ALIGN); 2566 skb_reserve(skb, NET_IP_ALIGN);
2546 }
2547 2567
2548 BUG_ON(info->nr_frags > MAX_SKB_FRAGS); 2568 napi->skb = skb;
2549 frag = info->frags;
2550
2551 for (i = 0; i < info->nr_frags; i++) {
2552 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2553 frag->size);
2554 frag++;
2555 } 2569 }
2556 skb_shinfo(skb)->nr_frags = info->nr_frags;
2557
2558 skb->data_len = info->len;
2559 skb->len += info->len;
2560 skb->truesize += info->len;
2561
2562 skb_reset_mac_header(skb);
2563 skb_gro_reset_offset(skb);
2564
2565 eth = skb_gro_header(skb, sizeof(*eth));
2566 if (!eth) {
2567 napi_reuse_skb(napi, skb);
2568 skb = NULL;
2569 goto out;
2570 }
2571
2572 skb_gro_pull(skb, sizeof(*eth));
2573
2574 /*
2575 * This works because the only protocols we care about don't require
2576 * special handling. We'll fix it up properly at the end.
2577 */
2578 skb->protocol = eth->h_proto;
2579
2580 skb->ip_summed = info->ip_summed;
2581 skb->csum = info->csum;
2582 2570
2583out: 2571out:
2584 return skb; 2572 return skb;
2585} 2573}
2586EXPORT_SYMBOL(napi_fraginfo_skb); 2574EXPORT_SYMBOL(napi_get_frags);
2587 2575
2588int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) 2576int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2589{ 2577{
@@ -2613,9 +2601,46 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2613} 2601}
2614EXPORT_SYMBOL(napi_frags_finish); 2602EXPORT_SYMBOL(napi_frags_finish);
2615 2603
2616int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2604struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2617{ 2605{
2618 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 2606 struct sk_buff *skb = napi->skb;
2607 struct ethhdr *eth;
2608 unsigned int hlen;
2609 unsigned int off;
2610
2611 napi->skb = NULL;
2612
2613 skb_reset_mac_header(skb);
2614 skb_gro_reset_offset(skb);
2615
2616 off = skb_gro_offset(skb);
2617 hlen = off + sizeof(*eth);
2618 eth = skb_gro_header_fast(skb, off);
2619 if (skb_gro_header_hard(skb, hlen)) {
2620 eth = skb_gro_header_slow(skb, hlen, off);
2621 if (unlikely(!eth)) {
2622 napi_reuse_skb(napi, skb);
2623 skb = NULL;
2624 goto out;
2625 }
2626 }
2627
2628 skb_gro_pull(skb, sizeof(*eth));
2629
2630 /*
2631 * This works because the only protocols we care about don't require
2632 * special handling. We'll fix it up properly at the end.
2633 */
2634 skb->protocol = eth->h_proto;
2635
2636out:
2637 return skb;
2638}
2639EXPORT_SYMBOL(napi_frags_skb);
2640
2641int napi_gro_frags(struct napi_struct *napi)
2642{
2643 struct sk_buff *skb = napi_frags_skb(napi);
2619 2644
2620 if (!skb) 2645 if (!skb)
2621 return NET_RX_DROP; 2646 return NET_RX_DROP;
@@ -2719,7 +2744,7 @@ void netif_napi_del(struct napi_struct *napi)
2719 struct sk_buff *skb, *next; 2744 struct sk_buff *skb, *next;
2720 2745
2721 list_del_init(&napi->dev_list); 2746 list_del_init(&napi->dev_list);
2722 kfree_skb(napi->skb); 2747 napi_free_frags(napi);
2723 2748
2724 for (skb = napi->gro_list; skb; skb = next) { 2749 for (skb = napi->gro_list; skb; skb = next) {
2725 next = skb->next; 2750 next = skb->next;
@@ -2773,8 +2798,10 @@ static void net_rx_action(struct softirq_action *h)
2773 * accidently calling ->poll() when NAPI is not scheduled. 2798 * accidently calling ->poll() when NAPI is not scheduled.
2774 */ 2799 */
2775 work = 0; 2800 work = 0;
2776 if (test_bit(NAPI_STATE_SCHED, &n->state)) 2801 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2777 work = n->poll(n, weight); 2802 work = n->poll(n, weight);
2803 trace_napi_poll(n);
2804 }
2778 2805
2779 WARN_ON_ONCE(work > weight); 2806 WARN_ON_ONCE(work > weight);
2780 2807
@@ -3444,6 +3471,319 @@ void dev_set_rx_mode(struct net_device *dev)
3444 netif_addr_unlock_bh(dev); 3471 netif_addr_unlock_bh(dev);
3445} 3472}
3446 3473
3474/* hw addresses list handling functions */
3475
3476static int __hw_addr_add(struct list_head *list, int *delta,
3477 unsigned char *addr, int addr_len,
3478 unsigned char addr_type)
3479{
3480 struct netdev_hw_addr *ha;
3481 int alloc_size;
3482
3483 if (addr_len > MAX_ADDR_LEN)
3484 return -EINVAL;
3485
3486 list_for_each_entry(ha, list, list) {
3487 if (!memcmp(ha->addr, addr, addr_len) &&
3488 ha->type == addr_type) {
3489 ha->refcount++;
3490 return 0;
3491 }
3492 }
3493
3494
3495 alloc_size = sizeof(*ha);
3496 if (alloc_size < L1_CACHE_BYTES)
3497 alloc_size = L1_CACHE_BYTES;
3498 ha = kmalloc(alloc_size, GFP_ATOMIC);
3499 if (!ha)
3500 return -ENOMEM;
3501 memcpy(ha->addr, addr, addr_len);
3502 ha->type = addr_type;
3503 ha->refcount = 1;
3504 ha->synced = false;
3505 list_add_tail_rcu(&ha->list, list);
3506 if (delta)
3507 (*delta)++;
3508 return 0;
3509}
3510
3511static void ha_rcu_free(struct rcu_head *head)
3512{
3513 struct netdev_hw_addr *ha;
3514
3515 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3516 kfree(ha);
3517}
3518
3519static int __hw_addr_del(struct list_head *list, int *delta,
3520 unsigned char *addr, int addr_len,
3521 unsigned char addr_type)
3522{
3523 struct netdev_hw_addr *ha;
3524
3525 list_for_each_entry(ha, list, list) {
3526 if (!memcmp(ha->addr, addr, addr_len) &&
3527 (ha->type == addr_type || !addr_type)) {
3528 if (--ha->refcount)
3529 return 0;
3530 list_del_rcu(&ha->list);
3531 call_rcu(&ha->rcu_head, ha_rcu_free);
3532 if (delta)
3533 (*delta)--;
3534 return 0;
3535 }
3536 }
3537 return -ENOENT;
3538}
3539
3540static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3541 struct list_head *from_list, int addr_len,
3542 unsigned char addr_type)
3543{
3544 int err;
3545 struct netdev_hw_addr *ha, *ha2;
3546 unsigned char type;
3547
3548 list_for_each_entry(ha, from_list, list) {
3549 type = addr_type ? addr_type : ha->type;
3550 err = __hw_addr_add(to_list, to_delta, ha->addr,
3551 addr_len, type);
3552 if (err)
3553 goto unroll;
3554 }
3555 return 0;
3556
3557unroll:
3558 list_for_each_entry(ha2, from_list, list) {
3559 if (ha2 == ha)
3560 break;
3561 type = addr_type ? addr_type : ha2->type;
3562 __hw_addr_del(to_list, to_delta, ha2->addr,
3563 addr_len, type);
3564 }
3565 return err;
3566}
3567
3568static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3569 struct list_head *from_list, int addr_len,
3570 unsigned char addr_type)
3571{
3572 struct netdev_hw_addr *ha;
3573 unsigned char type;
3574
3575 list_for_each_entry(ha, from_list, list) {
3576 type = addr_type ? addr_type : ha->type;
3577 __hw_addr_del(to_list, to_delta, ha->addr,
3578 addr_len, addr_type);
3579 }
3580}
3581
3582static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3583 struct list_head *from_list, int *from_delta,
3584 int addr_len)
3585{
3586 int err = 0;
3587 struct netdev_hw_addr *ha, *tmp;
3588
3589 list_for_each_entry_safe(ha, tmp, from_list, list) {
3590 if (!ha->synced) {
3591 err = __hw_addr_add(to_list, to_delta, ha->addr,
3592 addr_len, ha->type);
3593 if (err)
3594 break;
3595 ha->synced = true;
3596 ha->refcount++;
3597 } else if (ha->refcount == 1) {
3598 __hw_addr_del(to_list, to_delta, ha->addr,
3599 addr_len, ha->type);
3600 __hw_addr_del(from_list, from_delta, ha->addr,
3601 addr_len, ha->type);
3602 }
3603 }
3604 return err;
3605}
3606
3607static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3608 struct list_head *from_list, int *from_delta,
3609 int addr_len)
3610{
3611 struct netdev_hw_addr *ha, *tmp;
3612
3613 list_for_each_entry_safe(ha, tmp, from_list, list) {
3614 if (ha->synced) {
3615 __hw_addr_del(to_list, to_delta, ha->addr,
3616 addr_len, ha->type);
3617 ha->synced = false;
3618 __hw_addr_del(from_list, from_delta, ha->addr,
3619 addr_len, ha->type);
3620 }
3621 }
3622}
3623
3624
3625static void __hw_addr_flush(struct list_head *list)
3626{
3627 struct netdev_hw_addr *ha, *tmp;
3628
3629 list_for_each_entry_safe(ha, tmp, list, list) {
3630 list_del_rcu(&ha->list);
3631 call_rcu(&ha->rcu_head, ha_rcu_free);
3632 }
3633}
3634
3635/* Device addresses handling functions */
3636
3637static void dev_addr_flush(struct net_device *dev)
3638{
3639 /* rtnl_mutex must be held here */
3640
3641 __hw_addr_flush(&dev->dev_addr_list);
3642 dev->dev_addr = NULL;
3643}
3644
3645static int dev_addr_init(struct net_device *dev)
3646{
3647 unsigned char addr[MAX_ADDR_LEN];
3648 struct netdev_hw_addr *ha;
3649 int err;
3650
3651 /* rtnl_mutex must be held here */
3652
3653 INIT_LIST_HEAD(&dev->dev_addr_list);
3654 memset(addr, 0, sizeof(*addr));
3655 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(*addr),
3656 NETDEV_HW_ADDR_T_LAN);
3657 if (!err) {
3658 /*
3659 * Get the first (previously created) address from the list
3660 * and set dev_addr pointer to this location.
3661 */
3662 ha = list_first_entry(&dev->dev_addr_list,
3663 struct netdev_hw_addr, list);
3664 dev->dev_addr = ha->addr;
3665 }
3666 return err;
3667}
3668
3669/**
3670 * dev_addr_add - Add a device address
3671 * @dev: device
3672 * @addr: address to add
3673 * @addr_type: address type
3674 *
3675 * Add a device address to the device or increase the reference count if
3676 * it already exists.
3677 *
3678 * The caller must hold the rtnl_mutex.
3679 */
3680int dev_addr_add(struct net_device *dev, unsigned char *addr,
3681 unsigned char addr_type)
3682{
3683 int err;
3684
3685 ASSERT_RTNL();
3686
3687 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3688 addr_type);
3689 if (!err)
3690 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3691 return err;
3692}
3693EXPORT_SYMBOL(dev_addr_add);
3694
3695/**
3696 * dev_addr_del - Release a device address.
3697 * @dev: device
3698 * @addr: address to delete
3699 * @addr_type: address type
3700 *
3701 * Release reference to a device address and remove it from the device
3702 * if the reference count drops to zero.
3703 *
3704 * The caller must hold the rtnl_mutex.
3705 */
3706int dev_addr_del(struct net_device *dev, unsigned char *addr,
3707 unsigned char addr_type)
3708{
3709 int err;
3710 struct netdev_hw_addr *ha;
3711
3712 ASSERT_RTNL();
3713
3714 /*
3715 * We can not remove the first address from the list because
3716 * dev->dev_addr points to that.
3717 */
3718 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3719 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3720 return -ENOENT;
3721
3722 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3723 addr_type);
3724 if (!err)
3725 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3726 return err;
3727}
3728EXPORT_SYMBOL(dev_addr_del);
3729
3730/**
3731 * dev_addr_add_multiple - Add device addresses from another device
3732 * @to_dev: device to which addresses will be added
3733 * @from_dev: device from which addresses will be added
3734 * @addr_type: address type - 0 means type will be used from from_dev
3735 *
3736 * Add device addresses of the one device to another.
3737 **
3738 * The caller must hold the rtnl_mutex.
3739 */
3740int dev_addr_add_multiple(struct net_device *to_dev,
3741 struct net_device *from_dev,
3742 unsigned char addr_type)
3743{
3744 int err;
3745
3746 ASSERT_RTNL();
3747
3748 if (from_dev->addr_len != to_dev->addr_len)
3749 return -EINVAL;
3750 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3751 &from_dev->dev_addr_list,
3752 to_dev->addr_len, addr_type);
3753 if (!err)
3754 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3755 return err;
3756}
3757EXPORT_SYMBOL(dev_addr_add_multiple);
3758
3759/**
3760 * dev_addr_del_multiple - Delete device addresses by another device
3761 * @to_dev: device where the addresses will be deleted
3762 * @from_dev: device by which addresses the addresses will be deleted
3763 * @addr_type: address type - 0 means type will used from from_dev
3764 *
3765 * Deletes addresses in to device by the list of addresses in from device.
3766 *
3767 * The caller must hold the rtnl_mutex.
3768 */
3769int dev_addr_del_multiple(struct net_device *to_dev,
3770 struct net_device *from_dev,
3771 unsigned char addr_type)
3772{
3773 ASSERT_RTNL();
3774
3775 if (from_dev->addr_len != to_dev->addr_len)
3776 return -EINVAL;
3777 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3778 &from_dev->dev_addr_list,
3779 to_dev->addr_len, addr_type);
3780 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3781 return 0;
3782}
3783EXPORT_SYMBOL(dev_addr_del_multiple);
3784
3785/* unicast and multicast addresses handling functions */
3786
3447int __dev_addr_delete(struct dev_addr_list **list, int *count, 3787int __dev_addr_delete(struct dev_addr_list **list, int *count,
3448 void *addr, int alen, int glbl) 3788 void *addr, int alen, int glbl)
3449{ 3789{
@@ -3506,24 +3846,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
3506 * dev_unicast_delete - Release secondary unicast address. 3846 * dev_unicast_delete - Release secondary unicast address.
3507 * @dev: device 3847 * @dev: device
3508 * @addr: address to delete 3848 * @addr: address to delete
3509 * @alen: length of @addr
3510 * 3849 *
3511 * Release reference to a secondary unicast address and remove it 3850 * Release reference to a secondary unicast address and remove it
3512 * from the device if the reference count drops to zero. 3851 * from the device if the reference count drops to zero.
3513 * 3852 *
3514 * The caller must hold the rtnl_mutex. 3853 * The caller must hold the rtnl_mutex.
3515 */ 3854 */
3516int dev_unicast_delete(struct net_device *dev, void *addr, int alen) 3855int dev_unicast_delete(struct net_device *dev, void *addr)
3517{ 3856{
3518 int err; 3857 int err;
3519 3858
3520 ASSERT_RTNL(); 3859 ASSERT_RTNL();
3521 3860
3522 netif_addr_lock_bh(dev); 3861 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3523 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3862 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3524 if (!err) 3863 if (!err)
3525 __dev_set_rx_mode(dev); 3864 __dev_set_rx_mode(dev);
3526 netif_addr_unlock_bh(dev);
3527 return err; 3865 return err;
3528} 3866}
3529EXPORT_SYMBOL(dev_unicast_delete); 3867EXPORT_SYMBOL(dev_unicast_delete);
@@ -3532,24 +3870,22 @@ EXPORT_SYMBOL(dev_unicast_delete);
3532 * dev_unicast_add - add a secondary unicast address 3870 * dev_unicast_add - add a secondary unicast address
3533 * @dev: device 3871 * @dev: device
3534 * @addr: address to add 3872 * @addr: address to add
3535 * @alen: length of @addr
3536 * 3873 *
3537 * Add a secondary unicast address to the device or increase 3874 * Add a secondary unicast address to the device or increase
3538 * the reference count if it already exists. 3875 * the reference count if it already exists.
3539 * 3876 *
3540 * The caller must hold the rtnl_mutex. 3877 * The caller must hold the rtnl_mutex.
3541 */ 3878 */
3542int dev_unicast_add(struct net_device *dev, void *addr, int alen) 3879int dev_unicast_add(struct net_device *dev, void *addr)
3543{ 3880{
3544 int err; 3881 int err;
3545 3882
3546 ASSERT_RTNL(); 3883 ASSERT_RTNL();
3547 3884
3548 netif_addr_lock_bh(dev); 3885 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3549 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3886 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3550 if (!err) 3887 if (!err)
3551 __dev_set_rx_mode(dev); 3888 __dev_set_rx_mode(dev);
3552 netif_addr_unlock_bh(dev);
3553 return err; 3889 return err;
3554} 3890}
3555EXPORT_SYMBOL(dev_unicast_add); 3891EXPORT_SYMBOL(dev_unicast_add);
@@ -3606,8 +3942,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3606 * @from: source device 3942 * @from: source device
3607 * 3943 *
3608 * Add newly added addresses to the destination device and release 3944 * Add newly added addresses to the destination device and release
3609 * addresses that have no users left. The source device must be 3945 * addresses that have no users left.
3610 * locked by netif_tx_lock_bh.
3611 * 3946 *
3612 * This function is intended to be called from the dev->set_rx_mode 3947 * This function is intended to be called from the dev->set_rx_mode
3613 * function of layered software devices. 3948 * function of layered software devices.
@@ -3616,12 +3951,15 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3616{ 3951{
3617 int err = 0; 3952 int err = 0;
3618 3953
3619 netif_addr_lock_bh(to); 3954 ASSERT_RTNL();
3620 err = __dev_addr_sync(&to->uc_list, &to->uc_count, 3955
3621 &from->uc_list, &from->uc_count); 3956 if (to->addr_len != from->addr_len)
3957 return -EINVAL;
3958
3959 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3960 &from->uc_list, &from->uc_count, to->addr_len);
3622 if (!err) 3961 if (!err)
3623 __dev_set_rx_mode(to); 3962 __dev_set_rx_mode(to);
3624 netif_addr_unlock_bh(to);
3625 return err; 3963 return err;
3626} 3964}
3627EXPORT_SYMBOL(dev_unicast_sync); 3965EXPORT_SYMBOL(dev_unicast_sync);
@@ -3637,18 +3975,33 @@ EXPORT_SYMBOL(dev_unicast_sync);
3637 */ 3975 */
3638void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3976void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3639{ 3977{
3640 netif_addr_lock_bh(from); 3978 ASSERT_RTNL();
3641 netif_addr_lock(to);
3642 3979
3643 __dev_addr_unsync(&to->uc_list, &to->uc_count, 3980 if (to->addr_len != from->addr_len)
3644 &from->uc_list, &from->uc_count); 3981 return;
3645 __dev_set_rx_mode(to);
3646 3982
3647 netif_addr_unlock(to); 3983 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3648 netif_addr_unlock_bh(from); 3984 &from->uc_list, &from->uc_count, to->addr_len);
3985 __dev_set_rx_mode(to);
3649} 3986}
3650EXPORT_SYMBOL(dev_unicast_unsync); 3987EXPORT_SYMBOL(dev_unicast_unsync);
3651 3988
3989static void dev_unicast_flush(struct net_device *dev)
3990{
3991 /* rtnl_mutex must be held here */
3992
3993 __hw_addr_flush(&dev->uc_list);
3994 dev->uc_count = 0;
3995}
3996
3997static void dev_unicast_init(struct net_device *dev)
3998{
3999 /* rtnl_mutex must be held here */
4000
4001 INIT_LIST_HEAD(&dev->uc_list);
4002}
4003
4004
3652static void __dev_addr_discard(struct dev_addr_list **list) 4005static void __dev_addr_discard(struct dev_addr_list **list)
3653{ 4006{
3654 struct dev_addr_list *tmp; 4007 struct dev_addr_list *tmp;
@@ -3667,9 +4020,6 @@ static void dev_addr_discard(struct net_device *dev)
3667{ 4020{
3668 netif_addr_lock_bh(dev); 4021 netif_addr_lock_bh(dev);
3669 4022
3670 __dev_addr_discard(&dev->uc_list);
3671 dev->uc_count = 0;
3672
3673 __dev_addr_discard(&dev->mc_list); 4023 __dev_addr_discard(&dev->mc_list);
3674 dev->mc_count = 0; 4024 dev->mc_count = 0;
3675 4025
@@ -4262,6 +4612,7 @@ static void rollback_registered(struct net_device *dev)
4262 /* 4612 /*
4263 * Flush the unicast and multicast chains 4613 * Flush the unicast and multicast chains
4264 */ 4614 */
4615 dev_unicast_flush(dev);
4265 dev_addr_discard(dev); 4616 dev_addr_discard(dev);
4266 4617
4267 if (dev->netdev_ops->ndo_uninit) 4618 if (dev->netdev_ops->ndo_uninit)
@@ -4333,39 +4684,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4333} 4684}
4334EXPORT_SYMBOL(netdev_fix_features); 4685EXPORT_SYMBOL(netdev_fix_features);
4335 4686
4336/* Some devices need to (re-)set their netdev_ops inside
4337 * ->init() or similar. If that happens, we have to setup
4338 * the compat pointers again.
4339 */
4340void netdev_resync_ops(struct net_device *dev)
4341{
4342#ifdef CONFIG_COMPAT_NET_DEV_OPS
4343 const struct net_device_ops *ops = dev->netdev_ops;
4344
4345 dev->init = ops->ndo_init;
4346 dev->uninit = ops->ndo_uninit;
4347 dev->open = ops->ndo_open;
4348 dev->change_rx_flags = ops->ndo_change_rx_flags;
4349 dev->set_rx_mode = ops->ndo_set_rx_mode;
4350 dev->set_multicast_list = ops->ndo_set_multicast_list;
4351 dev->set_mac_address = ops->ndo_set_mac_address;
4352 dev->validate_addr = ops->ndo_validate_addr;
4353 dev->do_ioctl = ops->ndo_do_ioctl;
4354 dev->set_config = ops->ndo_set_config;
4355 dev->change_mtu = ops->ndo_change_mtu;
4356 dev->neigh_setup = ops->ndo_neigh_setup;
4357 dev->tx_timeout = ops->ndo_tx_timeout;
4358 dev->get_stats = ops->ndo_get_stats;
4359 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4360 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4361 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4362#ifdef CONFIG_NET_POLL_CONTROLLER
4363 dev->poll_controller = ops->ndo_poll_controller;
4364#endif
4365#endif
4366}
4367EXPORT_SYMBOL(netdev_resync_ops);
4368
4369/** 4687/**
4370 * register_netdevice - register a network device 4688 * register_netdevice - register a network device
4371 * @dev: device to register 4689 * @dev: device to register
@@ -4405,23 +4723,6 @@ int register_netdevice(struct net_device *dev)
4405 4723
4406 dev->iflink = -1; 4724 dev->iflink = -1;
4407 4725
4408#ifdef CONFIG_COMPAT_NET_DEV_OPS
4409 /* Netdevice_ops API compatibility support.
4410 * This is temporary until all network devices are converted.
4411 */
4412 if (dev->netdev_ops) {
4413 netdev_resync_ops(dev);
4414 } else {
4415 char drivername[64];
4416 pr_info("%s (%s): not using net_device_ops yet\n",
4417 dev->name, netdev_drivername(dev, drivername, 64));
4418
4419 /* This works only because net_device_ops and the
4420 compatibility structure are the same. */
4421 dev->netdev_ops = (void *) &(dev->init);
4422 }
4423#endif
4424
4425 /* Init, if this function is available */ 4726 /* Init, if this function is available */
4426 if (dev->netdev_ops->ndo_init) { 4727 if (dev->netdev_ops->ndo_init) {
4427 ret = dev->netdev_ops->ndo_init(dev); 4728 ret = dev->netdev_ops->ndo_init(dev);
@@ -4707,13 +5008,30 @@ void netdev_run_todo(void)
4707 * the internal statistics structure is used. 5008 * the internal statistics structure is used.
4708 */ 5009 */
4709const struct net_device_stats *dev_get_stats(struct net_device *dev) 5010const struct net_device_stats *dev_get_stats(struct net_device *dev)
4710 { 5011{
4711 const struct net_device_ops *ops = dev->netdev_ops; 5012 const struct net_device_ops *ops = dev->netdev_ops;
4712 5013
4713 if (ops->ndo_get_stats) 5014 if (ops->ndo_get_stats)
4714 return ops->ndo_get_stats(dev); 5015 return ops->ndo_get_stats(dev);
4715 else 5016 else {
4716 return &dev->stats; 5017 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5018 struct net_device_stats *stats = &dev->stats;
5019 unsigned int i;
5020 struct netdev_queue *txq;
5021
5022 for (i = 0; i < dev->num_tx_queues; i++) {
5023 txq = netdev_get_tx_queue(dev, i);
5024 tx_bytes += txq->tx_bytes;
5025 tx_packets += txq->tx_packets;
5026 tx_dropped += txq->tx_dropped;
5027 }
5028 if (tx_bytes || tx_packets || tx_dropped) {
5029 stats->tx_bytes = tx_bytes;
5030 stats->tx_packets = tx_packets;
5031 stats->tx_dropped = tx_dropped;
5032 }
5033 return stats;
5034 }
4717} 5035}
4718EXPORT_SYMBOL(dev_get_stats); 5036EXPORT_SYMBOL(dev_get_stats);
4719 5037
@@ -4748,18 +5066,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4748 struct netdev_queue *tx; 5066 struct netdev_queue *tx;
4749 struct net_device *dev; 5067 struct net_device *dev;
4750 size_t alloc_size; 5068 size_t alloc_size;
4751 void *p; 5069 struct net_device *p;
4752 5070
4753 BUG_ON(strlen(name) >= sizeof(dev->name)); 5071 BUG_ON(strlen(name) >= sizeof(dev->name));
4754 5072
4755 alloc_size = sizeof(struct net_device); 5073 alloc_size = sizeof(struct net_device);
4756 if (sizeof_priv) { 5074 if (sizeof_priv) {
4757 /* ensure 32-byte alignment of private area */ 5075 /* ensure 32-byte alignment of private area */
4758 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 5076 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
4759 alloc_size += sizeof_priv; 5077 alloc_size += sizeof_priv;
4760 } 5078 }
4761 /* ensure 32-byte alignment of whole construct */ 5079 /* ensure 32-byte alignment of whole construct */
4762 alloc_size += NETDEV_ALIGN_CONST; 5080 alloc_size += NETDEV_ALIGN - 1;
4763 5081
4764 p = kzalloc(alloc_size, GFP_KERNEL); 5082 p = kzalloc(alloc_size, GFP_KERNEL);
4765 if (!p) { 5083 if (!p) {
@@ -4771,13 +5089,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4771 if (!tx) { 5089 if (!tx) {
4772 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5090 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4773 "tx qdiscs.\n"); 5091 "tx qdiscs.\n");
4774 kfree(p); 5092 goto free_p;
4775 return NULL;
4776 } 5093 }
4777 5094
4778 dev = (struct net_device *) 5095 dev = PTR_ALIGN(p, NETDEV_ALIGN);
4779 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4780 dev->padded = (char *)dev - (char *)p; 5096 dev->padded = (char *)dev - (char *)p;
5097
5098 if (dev_addr_init(dev))
5099 goto free_tx;
5100
5101 dev_unicast_init(dev);
5102
4781 dev_net_set(dev, &init_net); 5103 dev_net_set(dev, &init_net);
4782 5104
4783 dev->_tx = tx; 5105 dev->_tx = tx;
@@ -4789,9 +5111,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4789 netdev_init_queues(dev); 5111 netdev_init_queues(dev);
4790 5112
4791 INIT_LIST_HEAD(&dev->napi_list); 5113 INIT_LIST_HEAD(&dev->napi_list);
5114 dev->priv_flags = IFF_XMIT_DST_RELEASE;
4792 setup(dev); 5115 setup(dev);
4793 strcpy(dev->name, name); 5116 strcpy(dev->name, name);
4794 return dev; 5117 return dev;
5118
5119free_tx:
5120 kfree(tx);
5121
5122free_p:
5123 kfree(p);
5124 return NULL;
4795} 5125}
4796EXPORT_SYMBOL(alloc_netdev_mq); 5126EXPORT_SYMBOL(alloc_netdev_mq);
4797 5127
@@ -4811,6 +5141,9 @@ void free_netdev(struct net_device *dev)
4811 5141
4812 kfree(dev->_tx); 5142 kfree(dev->_tx);
4813 5143
5144 /* Flush device addresses */
5145 dev_addr_flush(dev);
5146
4814 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5147 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4815 netif_napi_del(p); 5148 netif_napi_del(p);
4816 5149
@@ -4970,6 +5303,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
4970 /* 5303 /*
4971 * Flush the unicast and multicast chains 5304 * Flush the unicast and multicast chains
4972 */ 5305 */
5306 dev_unicast_flush(dev);
4973 dev_addr_discard(dev); 5307 dev_addr_discard(dev);
4974 5308
4975 netdev_unregister_kobject(dev); 5309 netdev_unregister_kobject(dev);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9fd0dc3cca99..a6c2ac2828fb 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -22,8 +22,10 @@
22#include <linux/timer.h> 22#include <linux/timer.h>
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <net/genetlink.h> 24#include <net/genetlink.h>
25#include <net/netevent.h>
25 26
26#include <trace/skb.h> 27#include <trace/skb.h>
28#include <trace/napi.h>
27 29
28#include <asm/unaligned.h> 30#include <asm/unaligned.h>
29 31
@@ -38,7 +40,8 @@ static void send_dm_alert(struct work_struct *unused);
38 * and the work handle that will send up 40 * and the work handle that will send up
39 * netlink alerts 41 * netlink alerts
40 */ 42 */
41struct sock *dm_sock; 43static int trace_state = TRACE_OFF;
44static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED;
42 45
43struct per_cpu_dm_data { 46struct per_cpu_dm_data {
44 struct work_struct dm_alert_work; 47 struct work_struct dm_alert_work;
@@ -47,11 +50,18 @@ struct per_cpu_dm_data {
47 struct timer_list send_timer; 50 struct timer_list send_timer;
48}; 51};
49 52
53struct dm_hw_stat_delta {
54 struct net_device *dev;
55 struct list_head list;
56 struct rcu_head rcu;
57 unsigned long last_drop_val;
58};
59
50static struct genl_family net_drop_monitor_family = { 60static struct genl_family net_drop_monitor_family = {
51 .id = GENL_ID_GENERATE, 61 .id = GENL_ID_GENERATE,
52 .hdrsize = 0, 62 .hdrsize = 0,
53 .name = "NET_DM", 63 .name = "NET_DM",
54 .version = 1, 64 .version = 2,
55 .maxattr = NET_DM_CMD_MAX, 65 .maxattr = NET_DM_CMD_MAX,
56}; 66};
57 67
@@ -59,19 +69,24 @@ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
59 69
60static int dm_hit_limit = 64; 70static int dm_hit_limit = 64;
61static int dm_delay = 1; 71static int dm_delay = 1;
62 72static unsigned long dm_hw_check_delta = 2*HZ;
73static LIST_HEAD(hw_stats_list);
63 74
64static void reset_per_cpu_data(struct per_cpu_dm_data *data) 75static void reset_per_cpu_data(struct per_cpu_dm_data *data)
65{ 76{
66 size_t al; 77 size_t al;
67 struct net_dm_alert_msg *msg; 78 struct net_dm_alert_msg *msg;
79 struct nlattr *nla;
68 80
69 al = sizeof(struct net_dm_alert_msg); 81 al = sizeof(struct net_dm_alert_msg);
70 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 82 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
83 al += sizeof(struct nlattr);
84
71 data->skb = genlmsg_new(al, GFP_KERNEL); 85 data->skb = genlmsg_new(al, GFP_KERNEL);
72 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 86 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
73 0, NET_DM_CMD_ALERT); 87 0, NET_DM_CMD_ALERT);
74 msg = __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_alert_msg)); 88 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
89 msg = nla_data(nla);
75 memset(msg, 0, al); 90 memset(msg, 0, al);
76 atomic_set(&data->dm_hit_count, dm_hit_limit); 91 atomic_set(&data->dm_hit_count, dm_hit_limit);
77} 92}
@@ -111,10 +126,11 @@ static void sched_send_work(unsigned long unused)
111 schedule_work(&data->dm_alert_work); 126 schedule_work(&data->dm_alert_work);
112} 127}
113 128
114static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) 129static void trace_drop_common(struct sk_buff *skb, void *location)
115{ 130{
116 struct net_dm_alert_msg *msg; 131 struct net_dm_alert_msg *msg;
117 struct nlmsghdr *nlh; 132 struct nlmsghdr *nlh;
133 struct nlattr *nla;
118 int i; 134 int i;
119 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 135 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
120 136
@@ -127,7 +143,8 @@ static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
127 } 143 }
128 144
129 nlh = (struct nlmsghdr *)data->skb->data; 145 nlh = (struct nlmsghdr *)data->skb->data;
130 msg = genlmsg_data(nlmsg_data(nlh)); 146 nla = genlmsg_data(nlmsg_data(nlh));
147 msg = nla_data(nla);
131 for (i = 0; i < msg->entries; i++) { 148 for (i = 0; i < msg->entries; i++) {
132 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 149 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
133 msg->points[i].count++; 150 msg->points[i].count++;
@@ -139,6 +156,7 @@ static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
139 * We need to create a new entry 156 * We need to create a new entry
140 */ 157 */
141 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 158 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
159 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
142 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 160 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
143 msg->points[msg->entries].count = 1; 161 msg->points[msg->entries].count = 1;
144 msg->entries++; 162 msg->entries++;
@@ -152,24 +170,80 @@ out:
152 return; 170 return;
153} 171}
154 172
173static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
174{
175 trace_drop_common(skb, location);
176}
177
178static void trace_napi_poll_hit(struct napi_struct *napi)
179{
180 struct dm_hw_stat_delta *new_stat;
181
182 /*
183 * Ratelimit our check time to dm_hw_check_delta jiffies
184 */
185 if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta))
186 return;
187
188 rcu_read_lock();
189 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
190 if ((new_stat->dev == napi->dev) &&
191 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
192 trace_drop_common(NULL, NULL);
193 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
194 break;
195 }
196 }
197 rcu_read_unlock();
198}
199
200
201static void free_dm_hw_stat(struct rcu_head *head)
202{
203 struct dm_hw_stat_delta *n;
204 n = container_of(head, struct dm_hw_stat_delta, rcu);
205 kfree(n);
206}
207
155static int set_all_monitor_traces(int state) 208static int set_all_monitor_traces(int state)
156{ 209{
157 int rc = 0; 210 int rc = 0;
211 struct dm_hw_stat_delta *new_stat = NULL;
212 struct dm_hw_stat_delta *temp;
213
214 spin_lock(&trace_state_lock);
158 215
159 switch (state) { 216 switch (state) {
160 case TRACE_ON: 217 case TRACE_ON:
161 rc |= register_trace_kfree_skb(trace_kfree_skb_hit); 218 rc |= register_trace_kfree_skb(trace_kfree_skb_hit);
219 rc |= register_trace_napi_poll(trace_napi_poll_hit);
162 break; 220 break;
163 case TRACE_OFF: 221 case TRACE_OFF:
164 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); 222 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit);
223 rc |= unregister_trace_napi_poll(trace_napi_poll_hit);
165 224
166 tracepoint_synchronize_unregister(); 225 tracepoint_synchronize_unregister();
226
227 /*
228 * Clean the device list
229 */
230 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
231 if (new_stat->dev == NULL) {
232 list_del_rcu(&new_stat->list);
233 call_rcu(&new_stat->rcu, free_dm_hw_stat);
234 }
235 }
167 break; 236 break;
168 default: 237 default:
169 rc = 1; 238 rc = 1;
170 break; 239 break;
171 } 240 }
172 241
242 if (!rc)
243 trace_state = state;
244
245 spin_unlock(&trace_state_lock);
246
173 if (rc) 247 if (rc)
174 return -EINPROGRESS; 248 return -EINPROGRESS;
175 return rc; 249 return rc;
@@ -197,6 +271,44 @@ static int net_dm_cmd_trace(struct sk_buff *skb,
197 return -ENOTSUPP; 271 return -ENOTSUPP;
198} 272}
199 273
274static int dropmon_net_event(struct notifier_block *ev_block,
275 unsigned long event, void *ptr)
276{
277 struct net_device *dev = ptr;
278 struct dm_hw_stat_delta *new_stat = NULL;
279 struct dm_hw_stat_delta *tmp;
280
281 switch (event) {
282 case NETDEV_REGISTER:
283 new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL);
284
285 if (!new_stat)
286 goto out;
287
288 new_stat->dev = dev;
289 INIT_RCU_HEAD(&new_stat->rcu);
290 spin_lock(&trace_state_lock);
291 list_add_rcu(&new_stat->list, &hw_stats_list);
292 spin_unlock(&trace_state_lock);
293 break;
294 case NETDEV_UNREGISTER:
295 spin_lock(&trace_state_lock);
296 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
297 if (new_stat->dev == dev) {
298 new_stat->dev = NULL;
299 if (trace_state == TRACE_OFF) {
300 list_del_rcu(&new_stat->list);
301 call_rcu(&new_stat->rcu, free_dm_hw_stat);
302 break;
303 }
304 }
305 }
306 spin_unlock(&trace_state_lock);
307 break;
308 }
309out:
310 return NOTIFY_DONE;
311}
200 312
201static struct genl_ops dropmon_ops[] = { 313static struct genl_ops dropmon_ops[] = {
202 { 314 {
@@ -213,6 +325,10 @@ static struct genl_ops dropmon_ops[] = {
213 }, 325 },
214}; 326};
215 327
328static struct notifier_block dropmon_net_notifier = {
329 .notifier_call = dropmon_net_event
330};
331
216static int __init init_net_drop_monitor(void) 332static int __init init_net_drop_monitor(void)
217{ 333{
218 int cpu; 334 int cpu;
@@ -236,12 +352,18 @@ static int __init init_net_drop_monitor(void)
236 ret = genl_register_ops(&net_drop_monitor_family, 352 ret = genl_register_ops(&net_drop_monitor_family,
237 &dropmon_ops[i]); 353 &dropmon_ops[i]);
238 if (ret) { 354 if (ret) {
239 printk(KERN_CRIT "failed to register operation %d\n", 355 printk(KERN_CRIT "Failed to register operation %d\n",
240 dropmon_ops[i].cmd); 356 dropmon_ops[i].cmd);
241 goto out_unreg; 357 goto out_unreg;
242 } 358 }
243 } 359 }
244 360
361 rc = register_netdevice_notifier(&dropmon_net_notifier);
362 if (rc < 0) {
363 printk(KERN_CRIT "Failed to register netdevice notifier\n");
364 goto out_unreg;
365 }
366
245 rc = 0; 367 rc = 0;
246 368
247 for_each_present_cpu(cpu) { 369 for_each_present_cpu(cpu) {
@@ -252,6 +374,7 @@ static int __init init_net_drop_monitor(void)
252 data->send_timer.data = cpu; 374 data->send_timer.data = cpu;
253 data->send_timer.function = sched_send_work; 375 data->send_timer.function = sched_send_work;
254 } 376 }
377
255 goto out; 378 goto out;
256 379
257out_unreg: 380out_unreg:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 98691e1466b8..bd309384f8b8 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -299,7 +299,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
299 } else if (rule->action == FR_ACT_GOTO) 299 } else if (rule->action == FR_ACT_GOTO)
300 goto errout_free; 300 goto errout_free;
301 301
302 err = ops->configure(rule, skb, nlh, frh, tb); 302 err = ops->configure(rule, skb, frh, tb);
303 if (err < 0) 303 if (err < 0)
304 goto errout_free; 304 goto errout_free;
305 305
@@ -500,7 +500,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
500 if (rule->target) 500 if (rule->target)
501 NLA_PUT_U32(skb, FRA_GOTO, rule->target); 501 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
502 502
503 if (ops->fill(rule, skb, nlh, frh) < 0) 503 if (ops->fill(rule, skb, frh) < 0)
504 goto nla_put_failure; 504 goto nla_put_failure;
505 505
506 return nlmsg_end(skb, nlh); 506 return nlmsg_end(skb, nlh);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 6d62d4618cfc..78e5bfc454ae 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -128,12 +128,12 @@ static void est_timer(unsigned long arg)
128 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
129 brate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
130 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log; 131 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
132 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
133 133
134 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
135 e->last_packets = npackets; 135 e->last_packets = npackets;
136 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; 136 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
137 e->rate_est->pps = (e->avpps+0x1FF)>>10; 137 e->rate_est->pps = (e->avpps+0x1FF)>>10;
138skip: 138skip:
139 read_unlock(&est_lock); 139 read_unlock(&est_lock);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 4c9c0121c9da..40a76ce19d9f 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -98,6 +98,31 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
98} 98}
99 99
100/* 100/*
101 * Copy kernel to iovec. Returns -EFAULT on error.
102 */
103
104int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
105 int offset, int len)
106{
107 int copy;
108 for (; len > 0; ++iov) {
109 /* Skip over the finished iovecs */
110 if (unlikely(offset >= iov->iov_len)) {
111 offset -= iov->iov_len;
112 continue;
113 }
114 copy = min_t(unsigned int, iov->iov_len - offset, len);
115 offset = 0;
116 if (copy_to_user(iov->iov_base, kdata, copy))
117 return -EFAULT;
118 kdata += copy;
119 len -= copy;
120 }
121
122 return 0;
123}
124
125/*
101 * Copy iovec to kernel. Returns -EFAULT on error. 126 * Copy iovec to kernel. Returns -EFAULT on error.
102 * 127 *
103 * Note: this modifies the original iovec. 128 * Note: this modifies the original iovec.
@@ -122,10 +147,11 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
122} 147}
123 148
124/* 149/*
125 * For use with ip_build_xmit 150 * Copy iovec from kernel. Returns -EFAULT on error.
126 */ 151 */
127int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, 152
128 int len) 153int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
154 int offset, int len)
129{ 155{
130 /* Skip over the finished iovecs */ 156 /* Skip over the finished iovecs */
131 while (offset >= iov->iov_len) { 157 while (offset >= iov->iov_len) {
@@ -236,3 +262,4 @@ EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
236EXPORT_SYMBOL(memcpy_fromiovec); 262EXPORT_SYMBOL(memcpy_fromiovec);
237EXPORT_SYMBOL(memcpy_fromiovecend); 263EXPORT_SYMBOL(memcpy_fromiovecend);
238EXPORT_SYMBOL(memcpy_toiovec); 264EXPORT_SYMBOL(memcpy_toiovec);
265EXPORT_SYMBOL(memcpy_toiovecend);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 2da59a0ac4ac..3994680c08b9 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -78,7 +78,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
78 goto err; 78 goto err;
79 79
80 if (!rtnl_trylock()) 80 if (!rtnl_trylock())
81 return -ERESTARTSYS; 81 return restart_syscall();
82 82
83 if (dev_isalive(net)) { 83 if (dev_isalive(net)) {
84 if ((ret = (*set)(net, new)) == 0) 84 if ((ret = (*set)(net, new)) == 0)
@@ -225,7 +225,8 @@ static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
225 if (len > 0 && buf[len - 1] == '\n') 225 if (len > 0 && buf[len - 1] == '\n')
226 --count; 226 --count;
227 227
228 rtnl_lock(); 228 if (!rtnl_trylock())
229 return restart_syscall();
229 ret = dev_set_alias(netdev, buf, count); 230 ret = dev_set_alias(netdev, buf, count);
230 rtnl_unlock(); 231 rtnl_unlock();
231 232
@@ -238,7 +239,8 @@ static ssize_t show_ifalias(struct device *dev,
238 const struct net_device *netdev = to_net_dev(dev); 239 const struct net_device *netdev = to_net_dev(dev);
239 ssize_t ret = 0; 240 ssize_t ret = 0;
240 241
241 rtnl_lock(); 242 if (!rtnl_trylock())
243 return restart_syscall();
242 if (netdev->ifalias) 244 if (netdev->ifalias)
243 ret = sprintf(buf, "%s\n", netdev->ifalias); 245 ret = sprintf(buf, "%s\n", netdev->ifalias);
244 rtnl_unlock(); 246 rtnl_unlock();
@@ -497,7 +499,6 @@ int netdev_register_kobject(struct net_device *net)
497 dev->platform_data = net; 499 dev->platform_data = net;
498 dev->groups = groups; 500 dev->groups = groups;
499 501
500 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
501 dev_set_name(dev, "%s", net->name); 502 dev_set_name(dev, "%s", net->name);
502 503
503#ifdef CONFIG_SYSFS 504#ifdef CONFIG_SYSFS
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index c8fb45665e4f..b07b25bd2cde 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -20,6 +20,7 @@
20#include <linux/netlink.h> 20#include <linux/netlink.h>
21#include <linux/net_dropmon.h> 21#include <linux/net_dropmon.h>
22#include <trace/skb.h> 22#include <trace/skb.h>
23#include <trace/napi.h>
23 24
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
25#include <asm/bitops.h> 26#include <asm/bitops.h>
@@ -27,3 +28,6 @@
27 28
28DEFINE_TRACE(kfree_skb); 29DEFINE_TRACE(kfree_skb);
29EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); 30EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
31
32DEFINE_TRACE(napi_poll);
33EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e3bebd36f053..b7292a2719dc 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -115,41 +115,34 @@ static void net_free(struct net *net)
115 kmem_cache_free(net_cachep, net); 115 kmem_cache_free(net_cachep, net);
116} 116}
117 117
118struct net *copy_net_ns(unsigned long flags, struct net *old_net) 118static struct net *net_create(void)
119{ 119{
120 struct net *new_net = NULL; 120 struct net *net;
121 int err; 121 int rv;
122
123 get_net(old_net);
124
125 if (!(flags & CLONE_NEWNET))
126 return old_net;
127
128 err = -ENOMEM;
129 new_net = net_alloc();
130 if (!new_net)
131 goto out_err;
132 122
123 net = net_alloc();
124 if (!net)
125 return ERR_PTR(-ENOMEM);
133 mutex_lock(&net_mutex); 126 mutex_lock(&net_mutex);
134 err = setup_net(new_net); 127 rv = setup_net(net);
135 if (!err) { 128 if (rv == 0) {
136 rtnl_lock(); 129 rtnl_lock();
137 list_add_tail(&new_net->list, &net_namespace_list); 130 list_add_tail(&net->list, &net_namespace_list);
138 rtnl_unlock(); 131 rtnl_unlock();
139 } 132 }
140 mutex_unlock(&net_mutex); 133 mutex_unlock(&net_mutex);
134 if (rv < 0) {
135 net_free(net);
136 return ERR_PTR(rv);
137 }
138 return net;
139}
141 140
142 if (err) 141struct net *copy_net_ns(unsigned long flags, struct net *old_net)
143 goto out_free; 142{
144out: 143 if (!(flags & CLONE_NEWNET))
145 put_net(old_net); 144 return get_net(old_net);
146 return new_net; 145 return net_create();
147
148out_free:
149 net_free(new_net);
150out_err:
151 new_net = ERR_PTR(err);
152 goto out;
153} 146}
154 147
155static void cleanup_net(struct work_struct *work) 148static void cleanup_net(struct work_struct *work)
@@ -203,9 +196,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
203static int __init net_ns_init(void) 196static int __init net_ns_init(void)
204{ 197{
205 struct net_generic *ng; 198 struct net_generic *ng;
206 int err;
207 199
208 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
209#ifdef CONFIG_NET_NS 200#ifdef CONFIG_NET_NS
210 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 201 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
211 SMP_CACHE_BYTES, 202 SMP_CACHE_BYTES,
@@ -224,15 +215,14 @@ static int __init net_ns_init(void)
224 rcu_assign_pointer(init_net.gen, ng); 215 rcu_assign_pointer(init_net.gen, ng);
225 216
226 mutex_lock(&net_mutex); 217 mutex_lock(&net_mutex);
227 err = setup_net(&init_net); 218 if (setup_net(&init_net))
219 panic("Could not setup the initial network namespace");
228 220
229 rtnl_lock(); 221 rtnl_lock();
230 list_add_tail(&init_net.list, &net_namespace_list); 222 list_add_tail(&init_net.list, &net_namespace_list);
231 rtnl_unlock(); 223 rtnl_unlock();
232 224
233 mutex_unlock(&net_mutex); 225 mutex_unlock(&net_mutex);
234 if (err)
235 panic("Could not setup the initial network namespace");
236 226
237 return 0; 227 return 0;
238} 228}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 64f51eec6576..7ab31a7576a1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -24,6 +24,7 @@
24#include <net/tcp.h> 24#include <net/tcp.h>
25#include <net/udp.h> 25#include <net/udp.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27#include <trace/napi.h>
27 28
28/* 29/*
29 * We maintain a small pool of fully-sized skbs, to make sure the 30 * We maintain a small pool of fully-sized skbs, to make sure the
@@ -137,6 +138,7 @@ static int poll_one_napi(struct netpoll_info *npinfo,
137 set_bit(NAPI_STATE_NPSVC, &napi->state); 138 set_bit(NAPI_STATE_NPSVC, &napi->state);
138 139
139 work = napi->poll(napi, budget); 140 work = napi->poll(napi, budget);
141 trace_napi_poll(napi);
140 142
141 clear_bit(NAPI_STATE_NPSVC, &napi->state); 143 clear_bit(NAPI_STATE_NPSVC, &napi->state);
142 atomic_dec(&trapped); 144 atomic_dec(&trapped);
@@ -300,8 +302,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
300 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 302 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
301 tries > 0; --tries) { 303 tries > 0; --tries) {
302 if (__netif_tx_trylock(txq)) { 304 if (__netif_tx_trylock(txq)) {
303 if (!netif_tx_queue_stopped(txq)) 305 if (!netif_tx_queue_stopped(txq)) {
304 status = ops->ndo_start_xmit(skb, dev); 306 status = ops->ndo_start_xmit(skb, dev);
307 if (status == NETDEV_TX_OK)
308 txq_trans_update(txq);
309 }
305 __netif_tx_unlock(txq); 310 __netif_tx_unlock(txq);
306 311
307 if (status == NETDEV_TX_OK) 312 if (status == NETDEV_TX_OK)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0666a827bc62..b8ccd3c88d63 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3438,6 +3438,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3438 retry_now: 3438 retry_now:
3439 ret = (*xmit)(pkt_dev->skb, odev); 3439 ret = (*xmit)(pkt_dev->skb, odev);
3440 if (likely(ret == NETDEV_TX_OK)) { 3440 if (likely(ret == NETDEV_TX_OK)) {
3441 txq_trans_update(txq);
3441 pkt_dev->last_ok = 1; 3442 pkt_dev->last_ok = 1;
3442 pkt_dev->sofar++; 3443 pkt_dev->sofar++;
3443 pkt_dev->seq_num++; 3444 pkt_dev->seq_num++;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e505b5392e1e..8e815e685f28 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -526,8 +526,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
526 new->sp = secpath_get(old->sp); 526 new->sp = secpath_get(old->sp);
527#endif 527#endif
528 memcpy(new->cb, old->cb, sizeof(old->cb)); 528 memcpy(new->cb, old->cb, sizeof(old->cb));
529 new->csum_start = old->csum_start; 529 new->csum = old->csum;
530 new->csum_offset = old->csum_offset;
531 new->local_df = old->local_df; 530 new->local_df = old->local_df;
532 new->pkt_type = old->pkt_type; 531 new->pkt_type = old->pkt_type;
533 new->ip_summed = old->ip_summed; 532 new->ip_summed = old->ip_summed;
@@ -538,6 +537,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
538#endif 537#endif
539 new->protocol = old->protocol; 538 new->protocol = old->protocol;
540 new->mark = old->mark; 539 new->mark = old->mark;
540 new->iif = old->iif;
541 __nf_copy(new, old); 541 __nf_copy(new, old);
542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -550,10 +550,18 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
550#endif 550#endif
551#endif 551#endif
552 new->vlan_tci = old->vlan_tci; 552 new->vlan_tci = old->vlan_tci;
553#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
554 new->do_not_encrypt = old->do_not_encrypt;
555 new->requeue = old->requeue;
556#endif
553 557
554 skb_copy_secmark(new, old); 558 skb_copy_secmark(new, old);
555} 559}
556 560
561/*
562 * You should not add any new code to this function. Add it to
563 * __copy_skb_header above instead.
564 */
557static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 565static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
558{ 566{
559#define C(x) n->x = skb->x 567#define C(x) n->x = skb->x
@@ -569,16 +577,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
569 n->cloned = 1; 577 n->cloned = 1;
570 n->nohdr = 0; 578 n->nohdr = 0;
571 n->destructor = NULL; 579 n->destructor = NULL;
572 C(iif);
573 C(tail); 580 C(tail);
574 C(end); 581 C(end);
575 C(head); 582 C(head);
576 C(data); 583 C(data);
577 C(truesize); 584 C(truesize);
578#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
579 C(do_not_encrypt);
580 C(requeue);
581#endif
582 atomic_set(&n->users, 1); 585 atomic_set(&n->users, 1);
583 586
584 atomic_inc(&(skb_shinfo(skb)->dataref)); 587 atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -2661,30 +2664,40 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2661{ 2664{
2662 struct sk_buff *p = *head; 2665 struct sk_buff *p = *head;
2663 struct sk_buff *nskb; 2666 struct sk_buff *nskb;
2667 struct skb_shared_info *skbinfo = skb_shinfo(skb);
2668 struct skb_shared_info *pinfo = skb_shinfo(p);
2664 unsigned int headroom; 2669 unsigned int headroom;
2665 unsigned int len = skb_gro_len(skb); 2670 unsigned int len = skb_gro_len(skb);
2671 unsigned int offset = skb_gro_offset(skb);
2672 unsigned int headlen = skb_headlen(skb);
2666 2673
2667 if (p->len + len >= 65536) 2674 if (p->len + len >= 65536)
2668 return -E2BIG; 2675 return -E2BIG;
2669 2676
2670 if (skb_shinfo(p)->frag_list) 2677 if (pinfo->frag_list)
2671 goto merge; 2678 goto merge;
2672 else if (skb_headlen(skb) <= skb_gro_offset(skb)) { 2679 else if (headlen <= offset) {
2673 if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > 2680 skb_frag_t *frag;
2674 MAX_SKB_FRAGS) 2681 skb_frag_t *frag2;
2682 int i = skbinfo->nr_frags;
2683 int nr_frags = pinfo->nr_frags + i;
2684
2685 offset -= headlen;
2686
2687 if (nr_frags > MAX_SKB_FRAGS)
2675 return -E2BIG; 2688 return -E2BIG;
2676 2689
2677 skb_shinfo(skb)->frags[0].page_offset += 2690 pinfo->nr_frags = nr_frags;
2678 skb_gro_offset(skb) - skb_headlen(skb); 2691 skbinfo->nr_frags = 0;
2679 skb_shinfo(skb)->frags[0].size -=
2680 skb_gro_offset(skb) - skb_headlen(skb);
2681 2692
2682 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2693 frag = pinfo->frags + nr_frags;
2683 skb_shinfo(skb)->frags, 2694 frag2 = skbinfo->frags + i;
2684 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2695 do {
2696 *--frag = *--frag2;
2697 } while (--i);
2685 2698
2686 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; 2699 frag->page_offset += offset;
2687 skb_shinfo(skb)->nr_frags = 0; 2700 frag->size -= offset;
2688 2701
2689 skb->truesize -= skb->data_len; 2702 skb->truesize -= skb->data_len;
2690 skb->len -= skb->data_len; 2703 skb->len -= skb->data_len;
@@ -2715,7 +2728,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2715 2728
2716 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2729 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2717 skb_shinfo(nskb)->frag_list = p; 2730 skb_shinfo(nskb)->frag_list = p;
2718 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; 2731 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2719 skb_header_release(p); 2732 skb_header_release(p);
2720 nskb->prev = p; 2733 nskb->prev = p;
2721 2734
@@ -2730,16 +2743,13 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2730 p = nskb; 2743 p = nskb;
2731 2744
2732merge: 2745merge:
2733 if (skb_gro_offset(skb) > skb_headlen(skb)) { 2746 if (offset > headlen) {
2734 skb_shinfo(skb)->frags[0].page_offset += 2747 skbinfo->frags[0].page_offset += offset - headlen;
2735 skb_gro_offset(skb) - skb_headlen(skb); 2748 skbinfo->frags[0].size -= offset - headlen;
2736 skb_shinfo(skb)->frags[0].size -= 2749 offset = headlen;
2737 skb_gro_offset(skb) - skb_headlen(skb);
2738 skb_gro_reset_offset(skb);
2739 skb_gro_pull(skb, skb_headlen(skb));
2740 } 2750 }
2741 2751
2742 __skb_pull(skb, skb_gro_offset(skb)); 2752 __skb_pull(skb, offset);
2743 2753
2744 p->prev->next = skb; 2754 p->prev->next = skb;
2745 p->prev = skb; 2755 p->prev = skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index 7dbf3ffb35cc..58dec9dff99a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -212,6 +212,7 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
212 212
213/* Maximal space eaten by iovec or ancilliary data plus some space */ 213/* Maximal space eaten by iovec or ancilliary data plus some space */
214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
215EXPORT_SYMBOL(sysctl_optmem_max);
215 216
216static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 217static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
217{ 218{
@@ -444,7 +445,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
444int sock_setsockopt(struct socket *sock, int level, int optname, 445int sock_setsockopt(struct socket *sock, int level, int optname,
445 char __user *optval, int optlen) 446 char __user *optval, int optlen)
446{ 447{
447 struct sock *sk=sock->sk; 448 struct sock *sk = sock->sk;
448 int val; 449 int val;
449 int valbool; 450 int valbool;
450 struct linger ling; 451 struct linger ling;
@@ -463,15 +464,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
463 if (get_user(val, (int __user *)optval)) 464 if (get_user(val, (int __user *)optval))
464 return -EFAULT; 465 return -EFAULT;
465 466
466 valbool = val?1:0; 467 valbool = val ? 1 : 0;
467 468
468 lock_sock(sk); 469 lock_sock(sk);
469 470
470 switch(optname) { 471 switch (optname) {
471 case SO_DEBUG: 472 case SO_DEBUG:
472 if (val && !capable(CAP_NET_ADMIN)) { 473 if (val && !capable(CAP_NET_ADMIN))
473 ret = -EACCES; 474 ret = -EACCES;
474 } else 475 else
475 sock_valbool_flag(sk, SOCK_DBG, valbool); 476 sock_valbool_flag(sk, SOCK_DBG, valbool);
476 break; 477 break;
477 case SO_REUSEADDR: 478 case SO_REUSEADDR:
@@ -582,7 +583,7 @@ set_rcvbuf:
582 ret = -EINVAL; /* 1003.1g */ 583 ret = -EINVAL; /* 1003.1g */
583 break; 584 break;
584 } 585 }
585 if (copy_from_user(&ling,optval,sizeof(ling))) { 586 if (copy_from_user(&ling, optval, sizeof(ling))) {
586 ret = -EFAULT; 587 ret = -EFAULT;
587 break; 588 break;
588 } 589 }
@@ -690,9 +691,8 @@ set_rcvbuf:
690 case SO_MARK: 691 case SO_MARK:
691 if (!capable(CAP_NET_ADMIN)) 692 if (!capable(CAP_NET_ADMIN))
692 ret = -EPERM; 693 ret = -EPERM;
693 else { 694 else
694 sk->sk_mark = val; 695 sk->sk_mark = val;
695 }
696 break; 696 break;
697 697
698 /* We implement the SO_SNDLOWAT etc to 698 /* We implement the SO_SNDLOWAT etc to
@@ -704,6 +704,7 @@ set_rcvbuf:
704 release_sock(sk); 704 release_sock(sk);
705 return ret; 705 return ret;
706} 706}
707EXPORT_SYMBOL(sock_setsockopt);
707 708
708 709
709int sock_getsockopt(struct socket *sock, int level, int optname, 710int sock_getsockopt(struct socket *sock, int level, int optname,
@@ -727,7 +728,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
727 728
728 memset(&v, 0, sizeof(v)); 729 memset(&v, 0, sizeof(v));
729 730
730 switch(optname) { 731 switch (optname) {
731 case SO_DEBUG: 732 case SO_DEBUG:
732 v.val = sock_flag(sk, SOCK_DBG); 733 v.val = sock_flag(sk, SOCK_DBG);
733 break; 734 break;
@@ -762,7 +763,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
762 763
763 case SO_ERROR: 764 case SO_ERROR:
764 v.val = -sock_error(sk); 765 v.val = -sock_error(sk);
765 if (v.val==0) 766 if (v.val == 0)
766 v.val = xchg(&sk->sk_err_soft, 0); 767 v.val = xchg(&sk->sk_err_soft, 0);
767 break; 768 break;
768 769
@@ -816,7 +817,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
816 break; 817 break;
817 818
818 case SO_RCVTIMEO: 819 case SO_RCVTIMEO:
819 lv=sizeof(struct timeval); 820 lv = sizeof(struct timeval);
820 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 821 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
821 v.tm.tv_sec = 0; 822 v.tm.tv_sec = 0;
822 v.tm.tv_usec = 0; 823 v.tm.tv_usec = 0;
@@ -827,7 +828,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
827 break; 828 break;
828 829
829 case SO_SNDTIMEO: 830 case SO_SNDTIMEO:
830 lv=sizeof(struct timeval); 831 lv = sizeof(struct timeval);
831 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 832 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
832 v.tm.tv_sec = 0; 833 v.tm.tv_sec = 0;
833 v.tm.tv_usec = 0; 834 v.tm.tv_usec = 0;
@@ -842,7 +843,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
842 break; 843 break;
843 844
844 case SO_SNDLOWAT: 845 case SO_SNDLOWAT:
845 v.val=1; 846 v.val = 1;
846 break; 847 break;
847 848
848 case SO_PASSCRED: 849 case SO_PASSCRED:
@@ -1002,6 +1003,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1002 1003
1003 return sk; 1004 return sk;
1004} 1005}
1006EXPORT_SYMBOL(sk_alloc);
1005 1007
1006void sk_free(struct sock *sk) 1008void sk_free(struct sock *sk)
1007{ 1009{
@@ -1026,6 +1028,7 @@ void sk_free(struct sock *sk)
1026 put_net(sock_net(sk)); 1028 put_net(sock_net(sk));
1027 sk_prot_free(sk->sk_prot_creator, sk); 1029 sk_prot_free(sk->sk_prot_creator, sk);
1028} 1030}
1031EXPORT_SYMBOL(sk_free);
1029 1032
1030/* 1033/*
1031 * Last sock_put should drop referrence to sk->sk_net. It has already 1034 * Last sock_put should drop referrence to sk->sk_net. It has already
@@ -1126,7 +1129,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1126out: 1129out:
1127 return newsk; 1130 return newsk;
1128} 1131}
1129
1130EXPORT_SYMBOL_GPL(sk_clone); 1132EXPORT_SYMBOL_GPL(sk_clone);
1131 1133
1132void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1134void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
@@ -1177,6 +1179,7 @@ void sock_wfree(struct sk_buff *skb)
1177 sk->sk_write_space(sk); 1179 sk->sk_write_space(sk);
1178 sock_put(sk); 1180 sock_put(sk);
1179} 1181}
1182EXPORT_SYMBOL(sock_wfree);
1180 1183
1181/* 1184/*
1182 * Read buffer destructor automatically called from kfree_skb. 1185 * Read buffer destructor automatically called from kfree_skb.
@@ -1188,6 +1191,7 @@ void sock_rfree(struct sk_buff *skb)
1188 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1191 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1189 sk_mem_uncharge(skb->sk, skb->truesize); 1192 sk_mem_uncharge(skb->sk, skb->truesize);
1190} 1193}
1194EXPORT_SYMBOL(sock_rfree);
1191 1195
1192 1196
1193int sock_i_uid(struct sock *sk) 1197int sock_i_uid(struct sock *sk)
@@ -1199,6 +1203,7 @@ int sock_i_uid(struct sock *sk)
1199 read_unlock(&sk->sk_callback_lock); 1203 read_unlock(&sk->sk_callback_lock);
1200 return uid; 1204 return uid;
1201} 1205}
1206EXPORT_SYMBOL(sock_i_uid);
1202 1207
1203unsigned long sock_i_ino(struct sock *sk) 1208unsigned long sock_i_ino(struct sock *sk)
1204{ 1209{
@@ -1209,6 +1214,7 @@ unsigned long sock_i_ino(struct sock *sk)
1209 read_unlock(&sk->sk_callback_lock); 1214 read_unlock(&sk->sk_callback_lock);
1210 return ino; 1215 return ino;
1211} 1216}
1217EXPORT_SYMBOL(sock_i_ino);
1212 1218
1213/* 1219/*
1214 * Allocate a skb from the socket's send buffer. 1220 * Allocate a skb from the socket's send buffer.
@@ -1217,7 +1223,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1217 gfp_t priority) 1223 gfp_t priority)
1218{ 1224{
1219 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1225 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1220 struct sk_buff * skb = alloc_skb(size, priority); 1226 struct sk_buff *skb = alloc_skb(size, priority);
1221 if (skb) { 1227 if (skb) {
1222 skb_set_owner_w(skb, sk); 1228 skb_set_owner_w(skb, sk);
1223 return skb; 1229 return skb;
@@ -1225,6 +1231,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1225 } 1231 }
1226 return NULL; 1232 return NULL;
1227} 1233}
1234EXPORT_SYMBOL(sock_wmalloc);
1228 1235
1229/* 1236/*
1230 * Allocate a skb from the socket's receive buffer. 1237 * Allocate a skb from the socket's receive buffer.
@@ -1261,6 +1268,7 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1261 } 1268 }
1262 return NULL; 1269 return NULL;
1263} 1270}
1271EXPORT_SYMBOL(sock_kmalloc);
1264 1272
1265/* 1273/*
1266 * Free an option memory block. 1274 * Free an option memory block.
@@ -1270,11 +1278,12 @@ void sock_kfree_s(struct sock *sk, void *mem, int size)
1270 kfree(mem); 1278 kfree(mem);
1271 atomic_sub(size, &sk->sk_omem_alloc); 1279 atomic_sub(size, &sk->sk_omem_alloc);
1272} 1280}
1281EXPORT_SYMBOL(sock_kfree_s);
1273 1282
1274/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1283/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1275 I think, these locks should be removed for datagram sockets. 1284 I think, these locks should be removed for datagram sockets.
1276 */ 1285 */
1277static long sock_wait_for_wmem(struct sock * sk, long timeo) 1286static long sock_wait_for_wmem(struct sock *sk, long timeo)
1278{ 1287{
1279 DEFINE_WAIT(wait); 1288 DEFINE_WAIT(wait);
1280 1289
@@ -1392,6 +1401,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1392{ 1401{
1393 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1402 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1394} 1403}
1404EXPORT_SYMBOL(sock_alloc_send_skb);
1395 1405
1396static void __lock_sock(struct sock *sk) 1406static void __lock_sock(struct sock *sk)
1397{ 1407{
@@ -1460,7 +1470,6 @@ int sk_wait_data(struct sock *sk, long *timeo)
1460 finish_wait(sk->sk_sleep, &wait); 1470 finish_wait(sk->sk_sleep, &wait);
1461 return rc; 1471 return rc;
1462} 1472}
1463
1464EXPORT_SYMBOL(sk_wait_data); 1473EXPORT_SYMBOL(sk_wait_data);
1465 1474
1466/** 1475/**
@@ -1541,7 +1550,6 @@ suppress_allocation:
1541 atomic_sub(amt, prot->memory_allocated); 1550 atomic_sub(amt, prot->memory_allocated);
1542 return 0; 1551 return 0;
1543} 1552}
1544
1545EXPORT_SYMBOL(__sk_mem_schedule); 1553EXPORT_SYMBOL(__sk_mem_schedule);
1546 1554
1547/** 1555/**
@@ -1560,7 +1568,6 @@ void __sk_mem_reclaim(struct sock *sk)
1560 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1568 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1561 *prot->memory_pressure = 0; 1569 *prot->memory_pressure = 0;
1562} 1570}
1563
1564EXPORT_SYMBOL(__sk_mem_reclaim); 1571EXPORT_SYMBOL(__sk_mem_reclaim);
1565 1572
1566 1573
@@ -1575,78 +1582,92 @@ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1575{ 1582{
1576 return -EOPNOTSUPP; 1583 return -EOPNOTSUPP;
1577} 1584}
1585EXPORT_SYMBOL(sock_no_bind);
1578 1586
1579int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1587int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1580 int len, int flags) 1588 int len, int flags)
1581{ 1589{
1582 return -EOPNOTSUPP; 1590 return -EOPNOTSUPP;
1583} 1591}
1592EXPORT_SYMBOL(sock_no_connect);
1584 1593
1585int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1594int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1586{ 1595{
1587 return -EOPNOTSUPP; 1596 return -EOPNOTSUPP;
1588} 1597}
1598EXPORT_SYMBOL(sock_no_socketpair);
1589 1599
1590int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1600int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1591{ 1601{
1592 return -EOPNOTSUPP; 1602 return -EOPNOTSUPP;
1593} 1603}
1604EXPORT_SYMBOL(sock_no_accept);
1594 1605
1595int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1606int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1596 int *len, int peer) 1607 int *len, int peer)
1597{ 1608{
1598 return -EOPNOTSUPP; 1609 return -EOPNOTSUPP;
1599} 1610}
1611EXPORT_SYMBOL(sock_no_getname);
1600 1612
1601unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt) 1613unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1602{ 1614{
1603 return 0; 1615 return 0;
1604} 1616}
1617EXPORT_SYMBOL(sock_no_poll);
1605 1618
1606int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1619int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1607{ 1620{
1608 return -EOPNOTSUPP; 1621 return -EOPNOTSUPP;
1609} 1622}
1623EXPORT_SYMBOL(sock_no_ioctl);
1610 1624
1611int sock_no_listen(struct socket *sock, int backlog) 1625int sock_no_listen(struct socket *sock, int backlog)
1612{ 1626{
1613 return -EOPNOTSUPP; 1627 return -EOPNOTSUPP;
1614} 1628}
1629EXPORT_SYMBOL(sock_no_listen);
1615 1630
1616int sock_no_shutdown(struct socket *sock, int how) 1631int sock_no_shutdown(struct socket *sock, int how)
1617{ 1632{
1618 return -EOPNOTSUPP; 1633 return -EOPNOTSUPP;
1619} 1634}
1635EXPORT_SYMBOL(sock_no_shutdown);
1620 1636
1621int sock_no_setsockopt(struct socket *sock, int level, int optname, 1637int sock_no_setsockopt(struct socket *sock, int level, int optname,
1622 char __user *optval, int optlen) 1638 char __user *optval, int optlen)
1623{ 1639{
1624 return -EOPNOTSUPP; 1640 return -EOPNOTSUPP;
1625} 1641}
1642EXPORT_SYMBOL(sock_no_setsockopt);
1626 1643
1627int sock_no_getsockopt(struct socket *sock, int level, int optname, 1644int sock_no_getsockopt(struct socket *sock, int level, int optname,
1628 char __user *optval, int __user *optlen) 1645 char __user *optval, int __user *optlen)
1629{ 1646{
1630 return -EOPNOTSUPP; 1647 return -EOPNOTSUPP;
1631} 1648}
1649EXPORT_SYMBOL(sock_no_getsockopt);
1632 1650
1633int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1651int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1634 size_t len) 1652 size_t len)
1635{ 1653{
1636 return -EOPNOTSUPP; 1654 return -EOPNOTSUPP;
1637} 1655}
1656EXPORT_SYMBOL(sock_no_sendmsg);
1638 1657
1639int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1658int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1640 size_t len, int flags) 1659 size_t len, int flags)
1641{ 1660{
1642 return -EOPNOTSUPP; 1661 return -EOPNOTSUPP;
1643} 1662}
1663EXPORT_SYMBOL(sock_no_recvmsg);
1644 1664
1645int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1665int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1646{ 1666{
1647 /* Mirror missing mmap method error code */ 1667 /* Mirror missing mmap method error code */
1648 return -ENODEV; 1668 return -ENODEV;
1649} 1669}
1670EXPORT_SYMBOL(sock_no_mmap);
1650 1671
1651ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1672ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1652{ 1673{
@@ -1660,6 +1681,7 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, siz
1660 kunmap(page); 1681 kunmap(page);
1661 return res; 1682 return res;
1662} 1683}
1684EXPORT_SYMBOL(sock_no_sendpage);
1663 1685
1664/* 1686/*
1665 * Default Socket Callbacks 1687 * Default Socket Callbacks
@@ -1723,6 +1745,7 @@ void sk_send_sigurg(struct sock *sk)
1723 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1745 if (send_sigurg(&sk->sk_socket->file->f_owner))
1724 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1746 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1725} 1747}
1748EXPORT_SYMBOL(sk_send_sigurg);
1726 1749
1727void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1750void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1728 unsigned long expires) 1751 unsigned long expires)
@@ -1730,7 +1753,6 @@ void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1730 if (!mod_timer(timer, expires)) 1753 if (!mod_timer(timer, expires))
1731 sock_hold(sk); 1754 sock_hold(sk);
1732} 1755}
1733
1734EXPORT_SYMBOL(sk_reset_timer); 1756EXPORT_SYMBOL(sk_reset_timer);
1735 1757
1736void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1758void sk_stop_timer(struct sock *sk, struct timer_list* timer)
@@ -1738,7 +1760,6 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1738 if (timer_pending(timer) && del_timer(timer)) 1760 if (timer_pending(timer) && del_timer(timer))
1739 __sock_put(sk); 1761 __sock_put(sk);
1740} 1762}
1741
1742EXPORT_SYMBOL(sk_stop_timer); 1763EXPORT_SYMBOL(sk_stop_timer);
1743 1764
1744void sock_init_data(struct socket *sock, struct sock *sk) 1765void sock_init_data(struct socket *sock, struct sock *sk)
@@ -1797,6 +1818,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1797 atomic_set(&sk->sk_refcnt, 1); 1818 atomic_set(&sk->sk_refcnt, 1);
1798 atomic_set(&sk->sk_drops, 0); 1819 atomic_set(&sk->sk_drops, 0);
1799} 1820}
1821EXPORT_SYMBOL(sock_init_data);
1800 1822
1801void lock_sock_nested(struct sock *sk, int subclass) 1823void lock_sock_nested(struct sock *sk, int subclass)
1802{ 1824{
@@ -1812,7 +1834,6 @@ void lock_sock_nested(struct sock *sk, int subclass)
1812 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 1834 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
1813 local_bh_enable(); 1835 local_bh_enable();
1814} 1836}
1815
1816EXPORT_SYMBOL(lock_sock_nested); 1837EXPORT_SYMBOL(lock_sock_nested);
1817 1838
1818void release_sock(struct sock *sk) 1839void release_sock(struct sock *sk)
@@ -1895,7 +1916,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
1895 1916
1896 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 1917 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1897} 1918}
1898
1899EXPORT_SYMBOL(sock_common_getsockopt); 1919EXPORT_SYMBOL(sock_common_getsockopt);
1900 1920
1901#ifdef CONFIG_COMPAT 1921#ifdef CONFIG_COMPAT
@@ -1925,7 +1945,6 @@ int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1925 msg->msg_namelen = addr_len; 1945 msg->msg_namelen = addr_len;
1926 return err; 1946 return err;
1927} 1947}
1928
1929EXPORT_SYMBOL(sock_common_recvmsg); 1948EXPORT_SYMBOL(sock_common_recvmsg);
1930 1949
1931/* 1950/*
@@ -1938,7 +1957,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname,
1938 1957
1939 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 1958 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1940} 1959}
1941
1942EXPORT_SYMBOL(sock_common_setsockopt); 1960EXPORT_SYMBOL(sock_common_setsockopt);
1943 1961
1944#ifdef CONFIG_COMPAT 1962#ifdef CONFIG_COMPAT
@@ -1989,7 +2007,6 @@ void sk_common_release(struct sock *sk)
1989 sk_refcnt_debug_release(sk); 2007 sk_refcnt_debug_release(sk);
1990 sock_put(sk); 2008 sock_put(sk);
1991} 2009}
1992
1993EXPORT_SYMBOL(sk_common_release); 2010EXPORT_SYMBOL(sk_common_release);
1994 2011
1995static DEFINE_RWLOCK(proto_list_lock); 2012static DEFINE_RWLOCK(proto_list_lock);
@@ -2171,7 +2188,6 @@ out_free_sock_slab:
2171out: 2188out:
2172 return -ENOBUFS; 2189 return -ENOBUFS;
2173} 2190}
2174
2175EXPORT_SYMBOL(proto_register); 2191EXPORT_SYMBOL(proto_register);
2176 2192
2177void proto_unregister(struct proto *prot) 2193void proto_unregister(struct proto *prot)
@@ -2198,7 +2214,6 @@ void proto_unregister(struct proto *prot)
2198 prot->twsk_prot->twsk_slab = NULL; 2214 prot->twsk_prot->twsk_slab = NULL;
2199 } 2215 }
2200} 2216}
2201
2202EXPORT_SYMBOL(proto_unregister); 2217EXPORT_SYMBOL(proto_unregister);
2203 2218
2204#ifdef CONFIG_PROC_FS 2219#ifdef CONFIG_PROC_FS
@@ -2324,33 +2339,3 @@ static int __init proto_init(void)
2324subsys_initcall(proto_init); 2339subsys_initcall(proto_init);
2325 2340
2326#endif /* PROC_FS */ 2341#endif /* PROC_FS */
2327
2328EXPORT_SYMBOL(sk_alloc);
2329EXPORT_SYMBOL(sk_free);
2330EXPORT_SYMBOL(sk_send_sigurg);
2331EXPORT_SYMBOL(sock_alloc_send_skb);
2332EXPORT_SYMBOL(sock_init_data);
2333EXPORT_SYMBOL(sock_kfree_s);
2334EXPORT_SYMBOL(sock_kmalloc);
2335EXPORT_SYMBOL(sock_no_accept);
2336EXPORT_SYMBOL(sock_no_bind);
2337EXPORT_SYMBOL(sock_no_connect);
2338EXPORT_SYMBOL(sock_no_getname);
2339EXPORT_SYMBOL(sock_no_getsockopt);
2340EXPORT_SYMBOL(sock_no_ioctl);
2341EXPORT_SYMBOL(sock_no_listen);
2342EXPORT_SYMBOL(sock_no_mmap);
2343EXPORT_SYMBOL(sock_no_poll);
2344EXPORT_SYMBOL(sock_no_recvmsg);
2345EXPORT_SYMBOL(sock_no_sendmsg);
2346EXPORT_SYMBOL(sock_no_sendpage);
2347EXPORT_SYMBOL(sock_no_setsockopt);
2348EXPORT_SYMBOL(sock_no_shutdown);
2349EXPORT_SYMBOL(sock_no_socketpair);
2350EXPORT_SYMBOL(sock_rfree);
2351EXPORT_SYMBOL(sock_setsockopt);
2352EXPORT_SYMBOL(sock_wfree);
2353EXPORT_SYMBOL(sock_wmalloc);
2354EXPORT_SYMBOL(sock_i_uid);
2355EXPORT_SYMBOL(sock_i_ino);
2356EXPORT_SYMBOL(sysctl_optmem_max);
diff --git a/net/core/stream.c b/net/core/stream.c
index 8727cead64ad..a37debfeb1b2 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -33,7 +33,8 @@ void sk_stream_write_space(struct sock *sk)
33 clear_bit(SOCK_NOSPACE, &sock->flags); 33 clear_bit(SOCK_NOSPACE, &sock->flags);
34 34
35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
36 wake_up_interruptible(sk->sk_sleep); 36 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT |
37 POLLWRNORM | POLLWRBAND);
37 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 38 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
38 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); 39 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
39 } 40 }
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9647d911f916..bccb3887773e 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1250,14 +1250,8 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1250 if (skb) { 1250 if (skb) {
1251 amount = skb->len; 1251 amount = skb->len;
1252 } else { 1252 } else {
1253 skb = sk->sk_receive_queue.next; 1253 skb_queue_walk(&sk->sk_receive_queue, skb)
1254 for (;;) {
1255 if (skb ==
1256 (struct sk_buff *)&sk->sk_receive_queue)
1257 break;
1258 amount += skb->len; 1254 amount += skb->len;
1259 skb = skb->next;
1260 }
1261 } 1255 }
1262 release_sock(sk); 1256 release_sock(sk);
1263 err = put_user(amount, (int __user *)arg); 1257 err = put_user(amount, (int __user *)arg);
@@ -1644,13 +1638,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1644 1638
1645static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) 1639static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1646{ 1640{
1647 struct sk_buff *skb = q->next; 1641 struct sk_buff *skb;
1648 int len = 0; 1642 int len = 0;
1649 1643
1650 if (flags & MSG_OOB) 1644 if (flags & MSG_OOB)
1651 return !skb_queue_empty(q) ? 1 : 0; 1645 return !skb_queue_empty(q) ? 1 : 0;
1652 1646
1653 while(skb != (struct sk_buff *)q) { 1647 skb_queue_walk(q, skb) {
1654 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1648 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1655 len += skb->len; 1649 len += skb->len;
1656 1650
@@ -1666,8 +1660,6 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
1666 /* minimum data length for read exceeded */ 1660 /* minimum data length for read exceeded */
1667 if (len >= target) 1661 if (len >= target)
1668 return 1; 1662 return 1;
1669
1670 skb = skb->next;
1671 } 1663 }
1672 1664
1673 return 0; 1665 return 0;
@@ -1683,7 +1675,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1683 size_t target = size > 1 ? 1 : 0; 1675 size_t target = size > 1 ? 1 : 0;
1684 size_t copied = 0; 1676 size_t copied = 0;
1685 int rv = 0; 1677 int rv = 0;
1686 struct sk_buff *skb, *nskb; 1678 struct sk_buff *skb, *n;
1687 struct dn_skb_cb *cb = NULL; 1679 struct dn_skb_cb *cb = NULL;
1688 unsigned char eor = 0; 1680 unsigned char eor = 0;
1689 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1681 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
@@ -1758,7 +1750,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1758 finish_wait(sk->sk_sleep, &wait); 1750 finish_wait(sk->sk_sleep, &wait);
1759 } 1751 }
1760 1752
1761 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { 1753 skb_queue_walk_safe(queue, skb, n) {
1762 unsigned int chunk = skb->len; 1754 unsigned int chunk = skb->len;
1763 cb = DN_SKB_CB(skb); 1755 cb = DN_SKB_CB(skb);
1764 1756
@@ -1775,7 +1767,6 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1775 skb_pull(skb, chunk); 1767 skb_pull(skb, chunk);
1776 1768
1777 eor = cb->nsp_flags & 0x40; 1769 eor = cb->nsp_flags & 0x40;
1778 nskb = skb->next;
1779 1770
1780 if (skb->len == 0) { 1771 if (skb->len == 0) {
1781 skb_unlink(skb, queue); 1772 skb_unlink(skb, queue);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 5d8a2a56fd39..932408dca86d 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -578,6 +578,7 @@ out:
578static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 578static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
579{ 579{
580 int err; 580 int err;
581 int skb_len;
581 582
582 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 583 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
583 number of warnings when compiling with -W --ANK 584 number of warnings when compiling with -W --ANK
@@ -592,22 +593,12 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
592 if (err) 593 if (err)
593 goto out; 594 goto out;
594 595
596 skb_len = skb->len;
595 skb_set_owner_r(skb, sk); 597 skb_set_owner_r(skb, sk);
596 skb_queue_tail(queue, skb); 598 skb_queue_tail(queue, skb);
597 599
598 /* This code only runs from BH or BH protected context. 600 if (!sock_flag(sk, SOCK_DEAD))
599 * Therefore the plain read_lock is ok here. -DaveM 601 sk->sk_data_ready(sk, skb_len);
600 */
601 read_lock(&sk->sk_callback_lock);
602 if (!sock_flag(sk, SOCK_DEAD)) {
603 struct socket *sock = sk->sk_socket;
604 wake_up_interruptible(sk->sk_sleep);
605 if (sock && sock->fasync_list &&
606 !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
607 __kill_fasync(sock->fasync_list, sig,
608 (sig == SIGURG) ? POLL_PRI : POLL_IN);
609 }
610 read_unlock(&sk->sk_callback_lock);
611out: 602out:
612 return err; 603 return err;
613} 604}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 2013c25b7f5a..da04f459337e 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -382,7 +382,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
382{ 382{
383 struct dn_skb_cb *cb = DN_SKB_CB(skb); 383 struct dn_skb_cb *cb = DN_SKB_CB(skb);
384 struct dn_scp *scp = DN_SK(sk); 384 struct dn_scp *scp = DN_SK(sk);
385 struct sk_buff *skb2, *list, *ack = NULL; 385 struct sk_buff *skb2, *n, *ack = NULL;
386 int wakeup = 0; 386 int wakeup = 0;
387 int try_retrans = 0; 387 int try_retrans = 0;
388 unsigned long reftime = cb->stamp; 388 unsigned long reftime = cb->stamp;
@@ -390,9 +390,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
390 unsigned short xmit_count; 390 unsigned short xmit_count;
391 unsigned short segnum; 391 unsigned short segnum;
392 392
393 skb2 = q->next; 393 skb_queue_walk_safe(q, skb2, n) {
394 list = (struct sk_buff *)q;
395 while(list != skb2) {
396 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); 394 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
397 395
398 if (dn_before_or_equal(cb2->segnum, acknum)) 396 if (dn_before_or_equal(cb2->segnum, acknum))
@@ -400,8 +398,6 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
400 398
401 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ 399 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */
402 400
403 skb2 = skb2->next;
404
405 if (ack == NULL) 401 if (ack == NULL)
406 continue; 402 continue;
407 403
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 14fbca55e908..72495f25269f 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -115,7 +115,7 @@ static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
115} 115}
116 116
117static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 117static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
118 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 118 struct fib_rule_hdr *frh,
119 struct nlattr **tb) 119 struct nlattr **tb)
120{ 120{
121 int err = -EINVAL; 121 int err = -EINVAL;
@@ -192,7 +192,7 @@ unsigned dnet_addr_type(__le16 addr)
192} 192}
193 193
194static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 194static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct nlmsghdr *nlh, struct fib_rule_hdr *frh) 195 struct fib_rule_hdr *frh)
196{ 196{
197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule; 197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
198 198
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ed131181215d..2175e6d5cc8d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev)
67 return -ENETDOWN; 67 return -ENETDOWN;
68 68
69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { 69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
70 err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN); 70 err = dev_unicast_add(master, dev->dev_addr);
71 if (err < 0) 71 if (err < 0)
72 goto out; 72 goto out;
73 } 73 }
@@ -90,7 +90,7 @@ clear_allmulti:
90 dev_set_allmulti(master, -1); 90 dev_set_allmulti(master, -1);
91del_unicast: 91del_unicast:
92 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 92 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
93 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); 93 dev_unicast_delete(master, dev->dev_addr);
94out: 94out:
95 return err; 95 return err;
96} 96}
@@ -108,7 +108,7 @@ static int dsa_slave_close(struct net_device *dev)
108 dev_set_promiscuity(master, -1); 108 dev_set_promiscuity(master, -1);
109 109
110 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 110 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
111 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); 111 dev_unicast_delete(master, dev->dev_addr);
112 112
113 return 0; 113 return 0;
114} 114}
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
147 goto out; 147 goto out;
148 148
149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) { 149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
150 err = dev_unicast_add(master, addr->sa_data, ETH_ALEN); 150 err = dev_unicast_add(master, addr->sa_data);
151 if (err < 0) 151 if (err < 0)
152 return err; 152 return err;
153 } 153 }
154 154
155 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 155 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
156 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); 156 dev_unicast_delete(master, dev->dev_addr);
157 157
158out: 158out:
159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 6f479fa522c3..8121bf0029e3 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -901,15 +901,10 @@ static void aun_tx_ack(unsigned long seq, int result)
901 struct ec_cb *eb; 901 struct ec_cb *eb;
902 902
903 spin_lock_irqsave(&aun_queue_lock, flags); 903 spin_lock_irqsave(&aun_queue_lock, flags);
904 skb = skb_peek(&aun_queue); 904 skb_queue_walk(&aun_queue, skb) {
905 while (skb && skb != (struct sk_buff *)&aun_queue)
906 {
907 struct sk_buff *newskb = skb->next;
908 eb = (struct ec_cb *)&skb->cb; 905 eb = (struct ec_cb *)&skb->cb;
909 if (eb->seq == seq) 906 if (eb->seq == seq)
910 goto foundit; 907 goto foundit;
911
912 skb = newskb;
913 } 908 }
914 spin_unlock_irqrestore(&aun_queue_lock, flags); 909 spin_unlock_irqrestore(&aun_queue_lock, flags);
915 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); 910 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
@@ -982,23 +977,18 @@ static void aun_data_available(struct sock *sk, int slen)
982 977
983static void ab_cleanup(unsigned long h) 978static void ab_cleanup(unsigned long h)
984{ 979{
985 struct sk_buff *skb; 980 struct sk_buff *skb, *n;
986 unsigned long flags; 981 unsigned long flags;
987 982
988 spin_lock_irqsave(&aun_queue_lock, flags); 983 spin_lock_irqsave(&aun_queue_lock, flags);
989 skb = skb_peek(&aun_queue); 984 skb_queue_walk_safe(&aun_queue, skb, n) {
990 while (skb && skb != (struct sk_buff *)&aun_queue)
991 {
992 struct sk_buff *newskb = skb->next;
993 struct ec_cb *eb = (struct ec_cb *)&skb->cb; 985 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
994 if ((jiffies - eb->start) > eb->timeout) 986 if ((jiffies - eb->start) > eb->timeout) {
995 {
996 tx_result(skb->sk, eb->cookie, 987 tx_result(skb->sk, eb->cookie,
997 ECTYPE_TRANSMIT_NOT_PRESENT); 988 ECTYPE_TRANSMIT_NOT_PRESENT);
998 skb_unlink(skb, &aun_queue); 989 skb_unlink(skb, &aun_queue);
999 kfree_skb(skb); 990 kfree_skb(skb);
1000 } 991 }
1001 skb = newskb;
1002 } 992 }
1003 spin_unlock_irqrestore(&aun_queue_lock, flags); 993 spin_unlock_irqrestore(&aun_queue_lock, flags);
1004 994
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 280352aba403..5a883affecd3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -337,11 +337,6 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
337void ether_setup(struct net_device *dev) 337void ether_setup(struct net_device *dev)
338{ 338{
339 dev->header_ops = &eth_header_ops; 339 dev->header_ops = &eth_header_ops;
340#ifdef CONFIG_COMPAT_NET_DEV_OPS
341 dev->change_mtu = eth_change_mtu;
342 dev->set_mac_address = eth_mac_addr;
343 dev->validate_addr = eth_validate_addr;
344#endif
345 dev->type = ARPHRD_ETHER; 340 dev->type = ARPHRD_ETHER;
346 dev->hard_header_len = ETH_HLEN; 341 dev->hard_header_len = ETH_HLEN;
347 dev->mtu = ETH_DATA_LEN; 342 dev->mtu = ETH_DATA_LEN;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7f03373b8c07..d87362178588 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -375,6 +375,7 @@ lookup_protocol:
375 inet->uc_ttl = -1; 375 inet->uc_ttl = -1;
376 inet->mc_loop = 1; 376 inet->mc_loop = 1;
377 inet->mc_ttl = 1; 377 inet->mc_ttl = 1;
378 inet->mc_all = 1;
378 inet->mc_index = 0; 379 inet->mc_index = 0;
379 inet->mc_list = NULL; 380 inet->mc_list = NULL;
380 381
@@ -1003,8 +1004,6 @@ void inet_register_protosw(struct inet_protosw *p)
1003out: 1004out:
1004 spin_unlock_bh(&inetsw_lock); 1005 spin_unlock_bh(&inetsw_lock);
1005 1006
1006 synchronize_net();
1007
1008 return; 1007 return;
1009 1008
1010out_permanent: 1009out_permanent:
@@ -1248,13 +1247,20 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1248 struct sk_buff **pp = NULL; 1247 struct sk_buff **pp = NULL;
1249 struct sk_buff *p; 1248 struct sk_buff *p;
1250 struct iphdr *iph; 1249 struct iphdr *iph;
1250 unsigned int hlen;
1251 unsigned int off;
1252 unsigned int id;
1251 int flush = 1; 1253 int flush = 1;
1252 int proto; 1254 int proto;
1253 int id;
1254 1255
1255 iph = skb_gro_header(skb, sizeof(*iph)); 1256 off = skb_gro_offset(skb);
1256 if (unlikely(!iph)) 1257 hlen = off + sizeof(*iph);
1257 goto out; 1258 iph = skb_gro_header_fast(skb, off);
1259 if (skb_gro_header_hard(skb, hlen)) {
1260 iph = skb_gro_header_slow(skb, hlen, off);
1261 if (unlikely(!iph))
1262 goto out;
1263 }
1258 1264
1259 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1265 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1260 1266
@@ -1269,9 +1275,9 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1269 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1275 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1270 goto out_unlock; 1276 goto out_unlock;
1271 1277
1272 flush = ntohs(iph->tot_len) != skb_gro_len(skb) || 1278 id = ntohl(*(u32 *)&iph->id);
1273 iph->frag_off != htons(IP_DF); 1279 flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1274 id = ntohs(iph->id); 1280 id >>= 16;
1275 1281
1276 for (p = *head; p; p = p->next) { 1282 for (p = *head; p; p = p->next) {
1277 struct iphdr *iph2; 1283 struct iphdr *iph2;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 126bb911880f..3863c3a4223f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1347,7 +1347,8 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1347 struct net *net = ctl->extra2; 1347 struct net *net = ctl->extra2;
1348 1348
1349 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { 1349 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1350 rtnl_lock(); 1350 if (!rtnl_trylock())
1351 return restart_syscall();
1351 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1352 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1352 inet_forward_change(net); 1353 inet_forward_change(net);
1353 } else if (*valp) { 1354 } else if (*valp) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cafcc49d0993..e2f950592566 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -40,7 +40,6 @@
40#include <net/route.h> 40#include <net/route.h>
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/sock.h> 42#include <net/sock.h>
43#include <net/icmp.h>
44#include <net/arp.h> 43#include <net/arp.h>
45#include <net/ip_fib.h> 44#include <net/ip_fib.h>
46#include <net/rtnetlink.h> 45#include <net/rtnetlink.h>
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index ded8c44fb848..ecd39454235c 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -263,7 +263,6 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
263 263
264 err = fib_semantic_match(&f->fn_alias, 264 err = fib_semantic_match(&f->fn_alias,
265 flp, res, 265 flp, res,
266 f->fn_key, fz->fz_mask,
267 fz->fz_order); 266 fz->fz_order);
268 if (err <= 0) 267 if (err <= 0)
269 goto out; 268 goto out;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 2c1623d2768b..637b133973bd 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -22,8 +22,7 @@ struct fib_alias {
22/* Exported by fib_semantics.c */ 22/* Exported by fib_semantics.c */
23extern int fib_semantic_match(struct list_head *head, 23extern int fib_semantic_match(struct list_head *head,
24 const struct flowi *flp, 24 const struct flowi *flp,
25 struct fib_result *res, __be32 zone, __be32 mask, 25 struct fib_result *res, int prefixlen);
26 int prefixlen);
27extern void fib_release_info(struct fib_info *); 26extern void fib_release_info(struct fib_info *);
28extern struct fib_info *fib_create_info(struct fib_config *cfg); 27extern struct fib_info *fib_create_info(struct fib_config *cfg);
29extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); 28extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6080d7120821..92d9d97ec5e3 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -134,7 +134,7 @@ static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
134}; 134};
135 135
136static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 136static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
137 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 137 struct fib_rule_hdr *frh,
138 struct nlattr **tb) 138 struct nlattr **tb)
139{ 139{
140 struct net *net = sock_net(skb->sk); 140 struct net *net = sock_net(skb->sk);
@@ -209,7 +209,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
209} 209}
210 210
211static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 211static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
212 struct nlmsghdr *nlh, struct fib_rule_hdr *frh) 212 struct fib_rule_hdr *frh)
213{ 213{
214 struct fib4_rule *rule4 = (struct fib4_rule *) rule; 214 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
215 215
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f831df500907..9b096d6ff3f2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -866,8 +866,7 @@ failure:
866 866
867/* Note! fib_semantic_match intentionally uses RCU list functions. */ 867/* Note! fib_semantic_match intentionally uses RCU list functions. */
868int fib_semantic_match(struct list_head *head, const struct flowi *flp, 868int fib_semantic_match(struct list_head *head, const struct flowi *flp,
869 struct fib_result *res, __be32 zone, __be32 mask, 869 struct fib_result *res, int prefixlen)
870 int prefixlen)
871{ 870{
872 struct fib_alias *fa; 871 struct fib_alias *fa;
873 int nh_sel = 0; 872 int nh_sel = 0;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 33c7c85dfe40..538d2a9a5115 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1351,8 +1351,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
1351 if (l->key != (key & ntohl(mask))) 1351 if (l->key != (key & ntohl(mask)))
1352 continue; 1352 continue;
1353 1353
1354 err = fib_semantic_match(&li->falh, flp, res, 1354 err = fib_semantic_match(&li->falh, flp, res, plen);
1355 htonl(l->key), mask, plen);
1356 1355
1357#ifdef CONFIG_IP_FIB_TRIE_STATS 1356#ifdef CONFIG_IP_FIB_TRIE_STATS
1358 if (err <= 0) 1357 if (err <= 0)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9eb6219af615..e6058a503796 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2196,7 +2196,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2196 break; 2196 break;
2197 } 2197 }
2198 if (!pmc) 2198 if (!pmc)
2199 return 1; 2199 return inet->mc_all;
2200 psl = pmc->sflist; 2200 psl = pmc->sflist;
2201 if (!psl) 2201 if (!psl)
2202 return pmc->sfmode == MCAST_EXCLUDE; 2202 return pmc->sfmode == MCAST_EXCLUDE;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 588a7796e3e3..b0b273503e2a 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -198,8 +198,6 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
198 tmo = 0; 198 tmo = 0;
199 199
200 r->idiag_family = tw->tw_family; 200 r->idiag_family = tw->tw_family;
201 r->idiag_state = tw->tw_state;
202 r->idiag_timer = 0;
203 r->idiag_retrans = 0; 201 r->idiag_retrans = 0;
204 r->id.idiag_if = tw->tw_bound_dev_if; 202 r->id.idiag_if = tw->tw_bound_dev_if;
205 r->id.idiag_cookie[0] = (u32)(unsigned long)tw; 203 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 8554d0ea1719..68a8d892c711 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -49,19 +49,22 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
49 inet_twsk_put(tw); 49 inet_twsk_put(tw);
50} 50}
51 51
52void inet_twsk_put(struct inet_timewait_sock *tw) 52static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
53{ 53{
54 if (atomic_dec_and_test(&tw->tw_refcnt)) { 54 struct module *owner = tw->tw_prot->owner;
55 struct module *owner = tw->tw_prot->owner; 55 twsk_destructor((struct sock *)tw);
56 twsk_destructor((struct sock *)tw);
57#ifdef SOCK_REFCNT_DEBUG 56#ifdef SOCK_REFCNT_DEBUG
58 printk(KERN_DEBUG "%s timewait_sock %p released\n", 57 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
59 tw->tw_prot->name, tw);
60#endif 58#endif
61 release_net(twsk_net(tw)); 59 release_net(twsk_net(tw));
62 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); 60 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
63 module_put(owner); 61 module_put(owner);
64 } 62}
63
64void inet_twsk_put(struct inet_timewait_sock *tw)
65{
66 if (atomic_dec_and_test(&tw->tw_refcnt))
67 inet_twsk_free(tw);
65} 68}
66EXPORT_SYMBOL_GPL(inet_twsk_put); 69EXPORT_SYMBOL_GPL(inet_twsk_put);
67 70
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e62510d5ea5a..77436e2732eb 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1238,6 +1238,7 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1238 dev->iflink = 0; 1238 dev->iflink = 0;
1239 dev->addr_len = 4; 1239 dev->addr_len = 4;
1240 dev->features |= NETIF_F_NETNS_LOCAL; 1240 dev->features |= NETIF_F_NETNS_LOCAL;
1241 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1241} 1242}
1242 1243
1243static int ipgre_tunnel_init(struct net_device *dev) 1244static int ipgre_tunnel_init(struct net_device *dev)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 1a58a6fa1dc0..40f6206b2aa9 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -358,10 +358,12 @@ static int ip_rcv_finish(struct sk_buff *skb)
358 goto drop; 358 goto drop;
359 359
360 rt = skb->rtable; 360 rt = skb->rtable;
361 if (rt->rt_type == RTN_MULTICAST) 361 if (rt->rt_type == RTN_MULTICAST) {
362 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCASTPKTS); 362 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST,
363 else if (rt->rt_type == RTN_BROADCAST) 363 skb->len);
364 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCASTPKTS); 364 } else if (rt->rt_type == RTN_BROADCAST)
365 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST,
366 skb->len);
365 367
366 return dst_input(skb); 368 return dst_input(skb);
367 369
@@ -384,7 +386,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
384 if (skb->pkt_type == PACKET_OTHERHOST) 386 if (skb->pkt_type == PACKET_OTHERHOST)
385 goto drop; 387 goto drop;
386 388
387 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INRECEIVES); 389
390 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
388 391
389 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 392 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
390 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); 393 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3e7e910c7c0f..ea19c37ccc0c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -181,10 +181,10 @@ static inline int ip_finish_output2(struct sk_buff *skb)
181 struct net_device *dev = dst->dev; 181 struct net_device *dev = dst->dev;
182 unsigned int hh_len = LL_RESERVED_SPACE(dev); 182 unsigned int hh_len = LL_RESERVED_SPACE(dev);
183 183
184 if (rt->rt_type == RTN_MULTICAST) 184 if (rt->rt_type == RTN_MULTICAST) {
185 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS); 185 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
186 else if (rt->rt_type == RTN_BROADCAST) 186 } else if (rt->rt_type == RTN_BROADCAST)
187 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS); 187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
188 188
189 /* Be paranoid, rather than too clever. */ 189 /* Be paranoid, rather than too clever. */
190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -244,7 +244,7 @@ int ip_mc_output(struct sk_buff *skb)
244 /* 244 /*
245 * If the indicated interface is up and running, send the packet. 245 * If the indicated interface is up and running, send the packet.
246 */ 246 */
247 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); 247 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
248 248
249 skb->dev = dev; 249 skb->dev = dev;
250 skb->protocol = htons(ETH_P_IP); 250 skb->protocol = htons(ETH_P_IP);
@@ -298,7 +298,7 @@ int ip_output(struct sk_buff *skb)
298{ 298{
299 struct net_device *dev = skb->dst->dev; 299 struct net_device *dev = skb->dst->dev;
300 300
301 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); 301 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
302 302
303 skb->dev = dev; 303 skb->dev = dev;
304 skb->protocol = htons(ETH_P_IP); 304 skb->protocol = htons(ETH_P_IP);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 43c05854d752..cb49936856e0 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -157,38 +157,39 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
157 /* Ordered by supposed usage frequency */ 157 /* Ordered by supposed usage frequency */
158 if (flags & 1) 158 if (flags & 1)
159 ip_cmsg_recv_pktinfo(msg, skb); 159 ip_cmsg_recv_pktinfo(msg, skb);
160 if ((flags>>=1) == 0) 160 if ((flags >>= 1) == 0)
161 return; 161 return;
162 162
163 if (flags & 1) 163 if (flags & 1)
164 ip_cmsg_recv_ttl(msg, skb); 164 ip_cmsg_recv_ttl(msg, skb);
165 if ((flags>>=1) == 0) 165 if ((flags >>= 1) == 0)
166 return; 166 return;
167 167
168 if (flags & 1) 168 if (flags & 1)
169 ip_cmsg_recv_tos(msg, skb); 169 ip_cmsg_recv_tos(msg, skb);
170 if ((flags>>=1) == 0) 170 if ((flags >>= 1) == 0)
171 return; 171 return;
172 172
173 if (flags & 1) 173 if (flags & 1)
174 ip_cmsg_recv_opts(msg, skb); 174 ip_cmsg_recv_opts(msg, skb);
175 if ((flags>>=1) == 0) 175 if ((flags >>= 1) == 0)
176 return; 176 return;
177 177
178 if (flags & 1) 178 if (flags & 1)
179 ip_cmsg_recv_retopts(msg, skb); 179 ip_cmsg_recv_retopts(msg, skb);
180 if ((flags>>=1) == 0) 180 if ((flags >>= 1) == 0)
181 return; 181 return;
182 182
183 if (flags & 1) 183 if (flags & 1)
184 ip_cmsg_recv_security(msg, skb); 184 ip_cmsg_recv_security(msg, skb);
185 185
186 if ((flags>>=1) == 0) 186 if ((flags >>= 1) == 0)
187 return; 187 return;
188 if (flags & 1) 188 if (flags & 1)
189 ip_cmsg_recv_dstaddr(msg, skb); 189 ip_cmsg_recv_dstaddr(msg, skb);
190 190
191} 191}
192EXPORT_SYMBOL(ip_cmsg_recv);
192 193
193int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) 194int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
194{ 195{
@@ -203,7 +204,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
203 switch (cmsg->cmsg_type) { 204 switch (cmsg->cmsg_type) {
204 case IP_RETOPTS: 205 case IP_RETOPTS:
205 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 206 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
206 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); 207 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
208 err < 40 ? err : 40);
207 if (err) 209 if (err)
208 return err; 210 return err;
209 break; 211 break;
@@ -238,7 +240,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
238struct ip_ra_chain *ip_ra_chain; 240struct ip_ra_chain *ip_ra_chain;
239DEFINE_RWLOCK(ip_ra_lock); 241DEFINE_RWLOCK(ip_ra_lock);
240 242
241int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) 243int ip_ra_control(struct sock *sk, unsigned char on,
244 void (*destructor)(struct sock *))
242{ 245{
243 struct ip_ra_chain *ra, *new_ra, **rap; 246 struct ip_ra_chain *ra, *new_ra, **rap;
244 247
@@ -248,7 +251,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
248 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 251 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
249 252
250 write_lock_bh(&ip_ra_lock); 253 write_lock_bh(&ip_ra_lock);
251 for (rap = &ip_ra_chain; (ra=*rap) != NULL; rap = &ra->next) { 254 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
252 if (ra->sk == sk) { 255 if (ra->sk == sk) {
253 if (on) { 256 if (on) {
254 write_unlock_bh(&ip_ra_lock); 257 write_unlock_bh(&ip_ra_lock);
@@ -416,7 +419,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
416 /* Reset and regenerate socket error */ 419 /* Reset and regenerate socket error */
417 spin_lock_bh(&sk->sk_error_queue.lock); 420 spin_lock_bh(&sk->sk_error_queue.lock);
418 sk->sk_err = 0; 421 sk->sk_err = 0;
419 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { 422 skb2 = skb_peek(&sk->sk_error_queue);
423 if (skb2 != NULL) {
420 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; 424 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
421 spin_unlock_bh(&sk->sk_error_queue.lock); 425 spin_unlock_bh(&sk->sk_error_queue.lock);
422 sk->sk_error_report(sk); 426 sk->sk_error_report(sk);
@@ -431,8 +435,8 @@ out:
431 435
432 436
433/* 437/*
434 * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on 438 * Socket option code for IP. This is the end of the line after any
435 * an IP socket. 439 * TCP,UDP etc options on an IP socket.
436 */ 440 */
437 441
438static int do_ip_setsockopt(struct sock *sk, int level, 442static int do_ip_setsockopt(struct sock *sk, int level,
@@ -449,6 +453,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
449 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
450 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
451 optname == IP_MULTICAST_TTL || 455 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL ||
452 optname == IP_MULTICAST_LOOP || 457 optname == IP_MULTICAST_LOOP ||
453 optname == IP_RECVORIGDSTADDR) { 458 optname == IP_RECVORIGDSTADDR) {
454 if (optlen >= sizeof(int)) { 459 if (optlen >= sizeof(int)) {
@@ -474,7 +479,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
474 switch (optname) { 479 switch (optname) {
475 case IP_OPTIONS: 480 case IP_OPTIONS:
476 { 481 {
477 struct ip_options * opt = NULL; 482 struct ip_options *opt = NULL;
478 if (optlen > 40 || optlen < 0) 483 if (optlen > 40 || optlen < 0)
479 goto e_inval; 484 goto e_inval;
480 err = ip_options_get_from_user(sock_net(sk), &opt, 485 err = ip_options_get_from_user(sock_net(sk), &opt,
@@ -556,9 +561,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
556 } 561 }
557 break; 562 break;
558 case IP_TTL: 563 case IP_TTL:
559 if (optlen<1) 564 if (optlen < 1)
560 goto e_inval; 565 goto e_inval;
561 if (val != -1 && (val < 1 || val>255)) 566 if (val != -1 && (val < 0 || val > 255))
562 goto e_inval; 567 goto e_inval;
563 inet->uc_ttl = val; 568 inet->uc_ttl = val;
564 break; 569 break;
@@ -570,7 +575,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
570 inet->hdrincl = val ? 1 : 0; 575 inet->hdrincl = val ? 1 : 0;
571 break; 576 break;
572 case IP_MTU_DISCOVER: 577 case IP_MTU_DISCOVER:
573 if (val<0 || val>3) 578 if (val < 0 || val > 3)
574 goto e_inval; 579 goto e_inval;
575 inet->pmtudisc = val; 580 inet->pmtudisc = val;
576 break; 581 break;
@@ -582,7 +587,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
582 case IP_MULTICAST_TTL: 587 case IP_MULTICAST_TTL:
583 if (sk->sk_type == SOCK_STREAM) 588 if (sk->sk_type == SOCK_STREAM)
584 goto e_inval; 589 goto e_inval;
585 if (optlen<1) 590 if (optlen < 1)
586 goto e_inval; 591 goto e_inval;
587 if (val == -1) 592 if (val == -1)
588 val = 1; 593 val = 1;
@@ -591,7 +596,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
591 inet->mc_ttl = val; 596 inet->mc_ttl = val;
592 break; 597 break;
593 case IP_MULTICAST_LOOP: 598 case IP_MULTICAST_LOOP:
594 if (optlen<1) 599 if (optlen < 1)
595 goto e_inval; 600 goto e_inval;
596 inet->mc_loop = !!val; 601 inet->mc_loop = !!val;
597 break; 602 break;
@@ -613,7 +618,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
613 } else { 618 } else {
614 memset(&mreq, 0, sizeof(mreq)); 619 memset(&mreq, 0, sizeof(mreq));
615 if (optlen >= sizeof(struct in_addr) && 620 if (optlen >= sizeof(struct in_addr) &&
616 copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) 621 copy_from_user(&mreq.imr_address, optval,
622 sizeof(struct in_addr)))
617 break; 623 break;
618 } 624 }
619 625
@@ -677,7 +683,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
677 } 683 }
678 case IP_MSFILTER: 684 case IP_MSFILTER:
679 { 685 {
680 extern int sysctl_igmp_max_msf;
681 struct ip_msfilter *msf; 686 struct ip_msfilter *msf;
682 687
683 if (optlen < IP_MSFILTER_SIZE(0)) 688 if (optlen < IP_MSFILTER_SIZE(0))
@@ -831,7 +836,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
831 } 836 }
832 case MCAST_MSFILTER: 837 case MCAST_MSFILTER:
833 { 838 {
834 extern int sysctl_igmp_max_msf;
835 struct sockaddr_in *psin; 839 struct sockaddr_in *psin;
836 struct ip_msfilter *msf = NULL; 840 struct ip_msfilter *msf = NULL;
837 struct group_filter *gsf = NULL; 841 struct group_filter *gsf = NULL;
@@ -849,9 +853,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
849 break; 853 break;
850 } 854 }
851 err = -EFAULT; 855 err = -EFAULT;
852 if (copy_from_user(gsf, optval, optlen)) { 856 if (copy_from_user(gsf, optval, optlen))
853 goto mc_msf_out; 857 goto mc_msf_out;
854 } 858
855 /* numsrc >= (4G-140)/128 overflow in 32 bits */ 859 /* numsrc >= (4G-140)/128 overflow in 32 bits */
856 if (gsf->gf_numsrc >= 0x1ffffff || 860 if (gsf->gf_numsrc >= 0x1ffffff ||
857 gsf->gf_numsrc > sysctl_igmp_max_msf) { 861 gsf->gf_numsrc > sysctl_igmp_max_msf) {
@@ -879,7 +883,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
879 msf->imsf_fmode = gsf->gf_fmode; 883 msf->imsf_fmode = gsf->gf_fmode;
880 msf->imsf_numsrc = gsf->gf_numsrc; 884 msf->imsf_numsrc = gsf->gf_numsrc;
881 err = -EADDRNOTAVAIL; 885 err = -EADDRNOTAVAIL;
882 for (i=0; i<gsf->gf_numsrc; ++i) { 886 for (i = 0; i < gsf->gf_numsrc; ++i) {
883 psin = (struct sockaddr_in *)&gsf->gf_slist[i]; 887 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
884 888
885 if (psin->sin_family != AF_INET) 889 if (psin->sin_family != AF_INET)
@@ -890,17 +894,24 @@ static int do_ip_setsockopt(struct sock *sk, int level,
890 gsf = NULL; 894 gsf = NULL;
891 895
892 err = ip_mc_msfilter(sk, msf, ifindex); 896 err = ip_mc_msfilter(sk, msf, ifindex);
893 mc_msf_out: 897mc_msf_out:
894 kfree(msf); 898 kfree(msf);
895 kfree(gsf); 899 kfree(gsf);
896 break; 900 break;
897 } 901 }
902 case IP_MULTICAST_ALL:
903 if (optlen < 1)
904 goto e_inval;
905 if (val != 0 && val != 1)
906 goto e_inval;
907 inet->mc_all = val;
908 break;
898 case IP_ROUTER_ALERT: 909 case IP_ROUTER_ALERT:
899 err = ip_ra_control(sk, val ? 1 : 0, NULL); 910 err = ip_ra_control(sk, val ? 1 : 0, NULL);
900 break; 911 break;
901 912
902 case IP_FREEBIND: 913 case IP_FREEBIND:
903 if (optlen<1) 914 if (optlen < 1)
904 goto e_inval; 915 goto e_inval;
905 inet->freebind = !!val; 916 inet->freebind = !!val;
906 break; 917 break;
@@ -957,6 +968,7 @@ int ip_setsockopt(struct sock *sk, int level,
957#endif 968#endif
958 return err; 969 return err;
959} 970}
971EXPORT_SYMBOL(ip_setsockopt);
960 972
961#ifdef CONFIG_COMPAT 973#ifdef CONFIG_COMPAT
962int compat_ip_setsockopt(struct sock *sk, int level, int optname, 974int compat_ip_setsockopt(struct sock *sk, int level, int optname,
@@ -986,13 +998,12 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
986#endif 998#endif
987 return err; 999 return err;
988} 1000}
989
990EXPORT_SYMBOL(compat_ip_setsockopt); 1001EXPORT_SYMBOL(compat_ip_setsockopt);
991#endif 1002#endif
992 1003
993/* 1004/*
994 * Get the options. Note for future reference. The GET of IP options gets the 1005 * Get the options. Note for future reference. The GET of IP options gets
995 * _received_ ones. The set sets the _sent_ ones. 1006 * the _received_ ones. The set sets the _sent_ ones.
996 */ 1007 */
997 1008
998static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1009static int do_ip_getsockopt(struct sock *sk, int level, int optname,
@@ -1143,10 +1154,14 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1143 return -EFAULT; 1154 return -EFAULT;
1144 } 1155 }
1145 err = ip_mc_gsfget(sk, &gsf, 1156 err = ip_mc_gsfget(sk, &gsf,
1146 (struct group_filter __user *)optval, optlen); 1157 (struct group_filter __user *)optval,
1158 optlen);
1147 release_sock(sk); 1159 release_sock(sk);
1148 return err; 1160 return err;
1149 } 1161 }
1162 case IP_MULTICAST_ALL:
1163 val = inet->mc_all;
1164 break;
1150 case IP_PKTOPTIONS: 1165 case IP_PKTOPTIONS:
1151 { 1166 {
1152 struct msghdr msg; 1167 struct msghdr msg;
@@ -1187,7 +1202,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1187 } 1202 }
1188 release_sock(sk); 1203 release_sock(sk);
1189 1204
1190 if (len < sizeof(int) && len > 0 && val>=0 && val<=255) { 1205 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1191 unsigned char ucval = (unsigned char)val; 1206 unsigned char ucval = (unsigned char)val;
1192 len = 1; 1207 len = 1;
1193 if (put_user(len, optlen)) 1208 if (put_user(len, optlen))
@@ -1230,6 +1245,7 @@ int ip_getsockopt(struct sock *sk, int level,
1230#endif 1245#endif
1231 return err; 1246 return err;
1232} 1247}
1248EXPORT_SYMBOL(ip_getsockopt);
1233 1249
1234#ifdef CONFIG_COMPAT 1250#ifdef CONFIG_COMPAT
1235int compat_ip_getsockopt(struct sock *sk, int level, int optname, 1251int compat_ip_getsockopt(struct sock *sk, int level, int optname,
@@ -1262,11 +1278,5 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1262#endif 1278#endif
1263 return err; 1279 return err;
1264} 1280}
1265
1266EXPORT_SYMBOL(compat_ip_getsockopt); 1281EXPORT_SYMBOL(compat_ip_getsockopt);
1267#endif 1282#endif
1268
1269EXPORT_SYMBOL(ip_cmsg_recv);
1270
1271EXPORT_SYMBOL(ip_getsockopt);
1272EXPORT_SYMBOL(ip_setsockopt);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 88bf051d0cbb..f8d04c256454 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -160,6 +160,9 @@ static char user_dev_name[IFNAMSIZ] __initdata = { 0, };
160/* Protocols supported by available interfaces */ 160/* Protocols supported by available interfaces */
161static int ic_proto_have_if __initdata = 0; 161static int ic_proto_have_if __initdata = 0;
162 162
163/* MTU for boot device */
164static int ic_dev_mtu __initdata = 0;
165
163#ifdef IPCONFIG_DYNAMIC 166#ifdef IPCONFIG_DYNAMIC
164static DEFINE_SPINLOCK(ic_recv_lock); 167static DEFINE_SPINLOCK(ic_recv_lock);
165static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */ 168static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */
@@ -286,7 +289,7 @@ set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
286 sin->sin_port = port; 289 sin->sin_port = port;
287} 290}
288 291
289static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) 292static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
290{ 293{
291 int res; 294 int res;
292 295
@@ -297,6 +300,17 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
297 return res; 300 return res;
298} 301}
299 302
303static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
304{
305 int res;
306
307 mm_segment_t oldfs = get_fs();
308 set_fs(get_ds());
309 res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
310 set_fs(oldfs);
311 return res;
312}
313
300static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg) 314static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
301{ 315{
302 int res; 316 int res;
@@ -321,20 +335,31 @@ static int __init ic_setup_if(void)
321 memset(&ir, 0, sizeof(ir)); 335 memset(&ir, 0, sizeof(ir));
322 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name); 336 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name);
323 set_sockaddr(sin, ic_myaddr, 0); 337 set_sockaddr(sin, ic_myaddr, 0);
324 if ((err = ic_dev_ioctl(SIOCSIFADDR, &ir)) < 0) { 338 if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
325 printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err); 339 printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err);
326 return -1; 340 return -1;
327 } 341 }
328 set_sockaddr(sin, ic_netmask, 0); 342 set_sockaddr(sin, ic_netmask, 0);
329 if ((err = ic_dev_ioctl(SIOCSIFNETMASK, &ir)) < 0) { 343 if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
330 printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err); 344 printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err);
331 return -1; 345 return -1;
332 } 346 }
333 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0); 347 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
334 if ((err = ic_dev_ioctl(SIOCSIFBRDADDR, &ir)) < 0) { 348 if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
335 printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err); 349 printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err);
336 return -1; 350 return -1;
337 } 351 }
352 /* Handle the case where we need non-standard MTU on the boot link (a network
353 * using jumbo frames, for instance). If we can't set the mtu, don't error
354 * out, we'll try to muddle along.
355 */
356 if (ic_dev_mtu != 0) {
357 strcpy(ir.ifr_name, ic_dev->name);
358 ir.ifr_mtu = ic_dev_mtu;
359 if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
360 printk(KERN_ERR "IP-Config: Unable to set interface mtu to %d (%d).\n",
361 ic_dev_mtu, err);
362 }
338 return 0; 363 return 0;
339} 364}
340 365
@@ -623,6 +648,7 @@ ic_dhcp_init_options(u8 *options)
623 12, /* Host name */ 648 12, /* Host name */
624 15, /* Domain name */ 649 15, /* Domain name */
625 17, /* Boot path */ 650 17, /* Boot path */
651 26, /* MTU */
626 40, /* NIS domain name */ 652 40, /* NIS domain name */
627 }; 653 };
628 654
@@ -798,6 +824,7 @@ static void __init ic_do_bootp_ext(u8 *ext)
798{ 824{
799 u8 servers; 825 u8 servers;
800 int i; 826 int i;
827 u16 mtu;
801 828
802#ifdef IPCONFIG_DEBUG 829#ifdef IPCONFIG_DEBUG
803 u8 *c; 830 u8 *c;
@@ -837,6 +864,10 @@ static void __init ic_do_bootp_ext(u8 *ext)
837 if (!root_server_path[0]) 864 if (!root_server_path[0])
838 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path)); 865 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path));
839 break; 866 break;
867 case 26: /* Interface MTU */
868 memcpy(&mtu, ext+1, sizeof(mtu));
869 ic_dev_mtu = ntohs(mtu);
870 break;
840 case 40: /* NIS Domain name (_not_ DNS) */ 871 case 40: /* NIS Domain name (_not_ DNS) */
841 ic_bootp_string(utsname()->domainname, ext+1, *ext, __NEW_UTS_LEN); 872 ic_bootp_string(utsname()->domainname, ext+1, *ext, __NEW_UTS_LEN);
842 break; 873 break;
@@ -1403,6 +1434,8 @@ static int __init ip_auto_config(void)
1403 printk(",\n bootserver=%pI4", &ic_servaddr); 1434 printk(",\n bootserver=%pI4", &ic_servaddr);
1404 printk(", rootserver=%pI4", &root_server_addr); 1435 printk(", rootserver=%pI4", &root_server_addr);
1405 printk(", rootpath=%s", root_server_path); 1436 printk(", rootpath=%s", root_server_path);
1437 if (ic_dev_mtu)
1438 printk(", mtu=%d", ic_dev_mtu);
1406 printk("\n"); 1439 printk("\n");
1407#endif /* !SILENT */ 1440#endif /* !SILENT */
1408 1441
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9054139795af..bb2f1b17fbf1 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -713,6 +713,7 @@ static void ipip_tunnel_setup(struct net_device *dev)
713 dev->iflink = 0; 713 dev->iflink = 0;
714 dev->addr_len = 4; 714 dev->addr_len = 4;
715 dev->features |= NETIF_F_NETNS_LOCAL; 715 dev->features |= NETIF_F_NETNS_LOCAL;
716 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
716} 717}
717 718
718static void ipip_tunnel_init(struct net_device *dev) 719static void ipip_tunnel_init(struct net_device *dev)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index cf0cdeeb1db0..f25542c48b7d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -90,14 +90,14 @@ static const struct file_operations sockstat_seq_fops = {
90 90
91/* snmp items */ 91/* snmp items */
92static const struct snmp_mib snmp4_ipstats_list[] = { 92static const struct snmp_mib snmp4_ipstats_list[] = {
93 SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES), 93 SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INPKTS),
94 SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS), 94 SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS),
95 SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS), 95 SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS),
96 SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), 96 SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
97 SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS), 97 SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
98 SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS), 98 SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
99 SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS), 99 SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
100 SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS), 100 SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTPKTS),
101 SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS), 101 SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
102 SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), 102 SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
103 SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), 103 SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
@@ -118,6 +118,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
118 SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), 118 SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
119 SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS), 119 SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS),
120 SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS), 120 SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS),
121 SNMP_MIB_ITEM("InOctets", IPSTATS_MIB_INOCTETS),
122 SNMP_MIB_ITEM("OutOctets", IPSTATS_MIB_OUTOCTETS),
123 SNMP_MIB_ITEM("InMcastOctets", IPSTATS_MIB_INMCASTOCTETS),
124 SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
125 SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
126 SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
121 SNMP_MIB_SENTINEL 127 SNMP_MIB_SENTINEL
122}; 128};
123 129
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b35a950d2e06..cd2b97f1b6e1 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -161,13 +161,12 @@ static __u16 const msstab[] = {
161 */ 161 */
162__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 162__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
163{ 163{
164 struct tcp_sock *tp = tcp_sk(sk);
165 const struct iphdr *iph = ip_hdr(skb); 164 const struct iphdr *iph = ip_hdr(skb);
166 const struct tcphdr *th = tcp_hdr(skb); 165 const struct tcphdr *th = tcp_hdr(skb);
167 int mssind; 166 int mssind;
168 const __u16 mss = *mssp; 167 const __u16 mss = *mssp;
169 168
170 tp->last_synq_overflow = jiffies; 169 tcp_synq_overflow(sk);
171 170
172 /* XXX sort msstab[] by probability? Binary search? */ 171 /* XXX sort msstab[] by probability? Binary search? */
173 for (mssind = 0; mss > msstab[mssind + 1]; mssind++) 172 for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
@@ -268,7 +267,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
268 if (!sysctl_tcp_syncookies || !th->ack) 267 if (!sysctl_tcp_syncookies || !th->ack)
269 goto out; 268 goto out;
270 269
271 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 270 if (tcp_synq_no_recent_overflow(sk) ||
272 (mss = cookie_check(skb, cookie)) == 0) { 271 (mss = cookie_check(skb, cookie)) == 0) {
273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); 272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
274 goto out; 273 goto out;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7a0f0b27bf1f..17b89c523f9d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
439 !tp->urg_data || 439 !tp->urg_data ||
440 before(tp->urg_seq, tp->copied_seq) || 440 before(tp->urg_seq, tp->copied_seq) ||
441 !before(tp->urg_seq, tp->rcv_nxt)) { 441 !before(tp->urg_seq, tp->rcv_nxt)) {
442 struct sk_buff *skb;
443
442 answ = tp->rcv_nxt - tp->copied_seq; 444 answ = tp->rcv_nxt - tp->copied_seq;
443 445
444 /* Subtract 1, if FIN is in queue. */ 446 /* Subtract 1, if FIN is in queue. */
445 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 447 skb = skb_peek_tail(&sk->sk_receive_queue);
446 answ -= 448 if (answ && skb)
447 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; 449 answ -= tcp_hdr(skb)->fin;
448 } else 450 } else
449 answ = tp->urg_seq - tp->copied_seq; 451 answ = tp->urg_seq - tp->copied_seq;
450 release_sock(sk); 452 release_sock(sk);
@@ -1382,11 +1384,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1382 1384
1383 /* Next get a buffer. */ 1385 /* Next get a buffer. */
1384 1386
1385 skb = skb_peek(&sk->sk_receive_queue); 1387 skb_queue_walk(&sk->sk_receive_queue, skb) {
1386 do {
1387 if (!skb)
1388 break;
1389
1390 /* Now that we have two receive queues this 1388 /* Now that we have two receive queues this
1391 * shouldn't happen. 1389 * shouldn't happen.
1392 */ 1390 */
@@ -1403,8 +1401,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1403 if (tcp_hdr(skb)->fin) 1401 if (tcp_hdr(skb)->fin)
1404 goto found_fin_ok; 1402 goto found_fin_ok;
1405 WARN_ON(!(flags & MSG_PEEK)); 1403 WARN_ON(!(flags & MSG_PEEK));
1406 skb = skb->next; 1404 }
1407 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1408 1405
1409 /* Well, if we have backlog, try to process it now yet. */ 1406 /* Well, if we have backlog, try to process it now yet. */
1410 1407
@@ -2518,20 +2515,30 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2518 unsigned int thlen; 2515 unsigned int thlen;
2519 unsigned int flags; 2516 unsigned int flags;
2520 unsigned int mss = 1; 2517 unsigned int mss = 1;
2518 unsigned int hlen;
2519 unsigned int off;
2521 int flush = 1; 2520 int flush = 1;
2522 int i; 2521 int i;
2523 2522
2524 th = skb_gro_header(skb, sizeof(*th)); 2523 off = skb_gro_offset(skb);
2525 if (unlikely(!th)) 2524 hlen = off + sizeof(*th);
2526 goto out; 2525 th = skb_gro_header_fast(skb, off);
2526 if (skb_gro_header_hard(skb, hlen)) {
2527 th = skb_gro_header_slow(skb, hlen, off);
2528 if (unlikely(!th))
2529 goto out;
2530 }
2527 2531
2528 thlen = th->doff * 4; 2532 thlen = th->doff * 4;
2529 if (thlen < sizeof(*th)) 2533 if (thlen < sizeof(*th))
2530 goto out; 2534 goto out;
2531 2535
2532 th = skb_gro_header(skb, thlen); 2536 hlen = off + thlen;
2533 if (unlikely(!th)) 2537 if (skb_gro_header_hard(skb, hlen)) {
2534 goto out; 2538 th = skb_gro_header_slow(skb, hlen, off);
2539 if (unlikely(!th))
2540 goto out;
2541 }
2535 2542
2536 skb_gro_pull(skb, thlen); 2543 skb_gro_pull(skb, thlen);
2537 2544
@@ -2544,7 +2551,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2544 2551
2545 th2 = tcp_hdr(p); 2552 th2 = tcp_hdr(p);
2546 2553
2547 if ((th->source ^ th2->source) | (th->dest ^ th2->dest)) { 2554 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
2548 NAPI_GRO_CB(p)->same_flow = 0; 2555 NAPI_GRO_CB(p)->same_flow = 0;
2549 continue; 2556 continue;
2550 } 2557 }
@@ -2559,14 +2566,14 @@ found:
2559 flush |= flags & TCP_FLAG_CWR; 2566 flush |= flags & TCP_FLAG_CWR;
2560 flush |= (flags ^ tcp_flag_word(th2)) & 2567 flush |= (flags ^ tcp_flag_word(th2)) &
2561 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2568 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
2562 flush |= (th->ack_seq ^ th2->ack_seq) | (th->window ^ th2->window); 2569 flush |= th->ack_seq ^ th2->ack_seq;
2563 for (i = sizeof(*th); !flush && i < thlen; i += 4) 2570 for (i = sizeof(*th); i < thlen; i += 4)
2564 flush |= *(u32 *)((u8 *)th + i) ^ 2571 flush |= *(u32 *)((u8 *)th + i) ^
2565 *(u32 *)((u8 *)th2 + i); 2572 *(u32 *)((u8 *)th2 + i);
2566 2573
2567 mss = skb_shinfo(p)->gso_size; 2574 mss = skb_shinfo(p)->gso_size;
2568 2575
2569 flush |= (len > mss) | !len; 2576 flush |= (len - 1) >= mss;
2570 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 2577 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2571 2578
2572 if (flush || skb_gro_receive(head, skb)) { 2579 if (flush || skb_gro_receive(head, skb)) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eec3e6f9956c..2bdb0da237e6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -77,7 +77,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
77int sysctl_tcp_sack __read_mostly = 1; 77int sysctl_tcp_sack __read_mostly = 1;
78int sysctl_tcp_fack __read_mostly = 1; 78int sysctl_tcp_fack __read_mostly = 1;
79int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 79int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
80int sysctl_tcp_ecn __read_mostly; 80int sysctl_tcp_ecn __read_mostly = 2;
81int sysctl_tcp_dsack __read_mostly = 1; 81int sysctl_tcp_dsack __read_mostly = 1;
82int sysctl_tcp_app_win __read_mostly = 31; 82int sysctl_tcp_app_win __read_mostly = 31;
83int sysctl_tcp_adv_win_scale __read_mostly = 2; 83int sysctl_tcp_adv_win_scale __read_mostly = 2;
@@ -4426,7 +4426,7 @@ drop:
4426 } 4426 }
4427 __skb_queue_head(&tp->out_of_order_queue, skb); 4427 __skb_queue_head(&tp->out_of_order_queue, skb);
4428 } else { 4428 } else {
4429 struct sk_buff *skb1 = tp->out_of_order_queue.prev; 4429 struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
4430 u32 seq = TCP_SKB_CB(skb)->seq; 4430 u32 seq = TCP_SKB_CB(skb)->seq;
4431 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4431 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4432 4432
@@ -4443,15 +4443,18 @@ drop:
4443 } 4443 }
4444 4444
4445 /* Find place to insert this segment. */ 4445 /* Find place to insert this segment. */
4446 do { 4446 while (1) {
4447 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4447 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4448 break; 4448 break;
4449 } while ((skb1 = skb1->prev) != 4449 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4450 (struct sk_buff *)&tp->out_of_order_queue); 4450 skb1 = NULL;
4451 break;
4452 }
4453 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4454 }
4451 4455
4452 /* Do skb overlap to previous one? */ 4456 /* Do skb overlap to previous one? */
4453 if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && 4457 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4454 before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4455 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4458 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4456 /* All the bits are present. Drop. */ 4459 /* All the bits are present. Drop. */
4457 __kfree_skb(skb); 4460 __kfree_skb(skb);
@@ -4463,15 +4466,26 @@ drop:
4463 tcp_dsack_set(sk, seq, 4466 tcp_dsack_set(sk, seq,
4464 TCP_SKB_CB(skb1)->end_seq); 4467 TCP_SKB_CB(skb1)->end_seq);
4465 } else { 4468 } else {
4466 skb1 = skb1->prev; 4469 if (skb_queue_is_first(&tp->out_of_order_queue,
4470 skb1))
4471 skb1 = NULL;
4472 else
4473 skb1 = skb_queue_prev(
4474 &tp->out_of_order_queue,
4475 skb1);
4467 } 4476 }
4468 } 4477 }
4469 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4478 if (!skb1)
4479 __skb_queue_head(&tp->out_of_order_queue, skb);
4480 else
4481 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4470 4482
4471 /* And clean segments covered by new one as whole. */ 4483 /* And clean segments covered by new one as whole. */
4472 while ((skb1 = skb->next) != 4484 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4473 (struct sk_buff *)&tp->out_of_order_queue && 4485 skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4474 after(end_seq, TCP_SKB_CB(skb1)->seq)) { 4486
4487 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4488 break;
4475 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4489 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4476 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4490 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4477 end_seq); 4491 end_seq);
@@ -4492,7 +4506,10 @@ add_sack:
4492static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4506static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4493 struct sk_buff_head *list) 4507 struct sk_buff_head *list)
4494{ 4508{
4495 struct sk_buff *next = skb->next; 4509 struct sk_buff *next = NULL;
4510
4511 if (!skb_queue_is_last(list, skb))
4512 next = skb_queue_next(list, skb);
4496 4513
4497 __skb_unlink(skb, list); 4514 __skb_unlink(skb, list);
4498 __kfree_skb(skb); 4515 __kfree_skb(skb);
@@ -4503,6 +4520,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4503 4520
4504/* Collapse contiguous sequence of skbs head..tail with 4521/* Collapse contiguous sequence of skbs head..tail with
4505 * sequence numbers start..end. 4522 * sequence numbers start..end.
4523 *
4524 * If tail is NULL, this means until the end of the list.
4525 *
4506 * Segments with FIN/SYN are not collapsed (only because this 4526 * Segments with FIN/SYN are not collapsed (only because this
4507 * simplifies code) 4527 * simplifies code)
4508 */ 4528 */
@@ -4511,15 +4531,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4511 struct sk_buff *head, struct sk_buff *tail, 4531 struct sk_buff *head, struct sk_buff *tail,
4512 u32 start, u32 end) 4532 u32 start, u32 end)
4513{ 4533{
4514 struct sk_buff *skb; 4534 struct sk_buff *skb, *n;
4535 bool end_of_skbs;
4515 4536
4516 /* First, check that queue is collapsible and find 4537 /* First, check that queue is collapsible and find
4517 * the point where collapsing can be useful. */ 4538 * the point where collapsing can be useful. */
4518 for (skb = head; skb != tail;) { 4539 skb = head;
4540restart:
4541 end_of_skbs = true;
4542 skb_queue_walk_from_safe(list, skb, n) {
4543 if (skb == tail)
4544 break;
4519 /* No new bits? It is possible on ofo queue. */ 4545 /* No new bits? It is possible on ofo queue. */
4520 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4546 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4521 skb = tcp_collapse_one(sk, skb, list); 4547 skb = tcp_collapse_one(sk, skb, list);
4522 continue; 4548 if (!skb)
4549 break;
4550 goto restart;
4523 } 4551 }
4524 4552
4525 /* The first skb to collapse is: 4553 /* The first skb to collapse is:
@@ -4529,16 +4557,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4529 */ 4557 */
4530 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4558 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
4531 (tcp_win_from_space(skb->truesize) > skb->len || 4559 (tcp_win_from_space(skb->truesize) > skb->len ||
4532 before(TCP_SKB_CB(skb)->seq, start) || 4560 before(TCP_SKB_CB(skb)->seq, start))) {
4533 (skb->next != tail && 4561 end_of_skbs = false;
4534 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
4535 break; 4562 break;
4563 }
4564
4565 if (!skb_queue_is_last(list, skb)) {
4566 struct sk_buff *next = skb_queue_next(list, skb);
4567 if (next != tail &&
4568 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
4569 end_of_skbs = false;
4570 break;
4571 }
4572 }
4536 4573
4537 /* Decided to skip this, advance start seq. */ 4574 /* Decided to skip this, advance start seq. */
4538 start = TCP_SKB_CB(skb)->end_seq; 4575 start = TCP_SKB_CB(skb)->end_seq;
4539 skb = skb->next;
4540 } 4576 }
4541 if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4577 if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
4542 return; 4578 return;
4543 4579
4544 while (before(start, end)) { 4580 while (before(start, end)) {
@@ -4583,7 +4619,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4583 } 4619 }
4584 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4620 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4585 skb = tcp_collapse_one(sk, skb, list); 4621 skb = tcp_collapse_one(sk, skb, list);
4586 if (skb == tail || 4622 if (!skb ||
4623 skb == tail ||
4587 tcp_hdr(skb)->syn || 4624 tcp_hdr(skb)->syn ||
4588 tcp_hdr(skb)->fin) 4625 tcp_hdr(skb)->fin)
4589 return; 4626 return;
@@ -4610,17 +4647,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4610 head = skb; 4647 head = skb;
4611 4648
4612 for (;;) { 4649 for (;;) {
4613 skb = skb->next; 4650 struct sk_buff *next = NULL;
4651
4652 if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
4653 next = skb_queue_next(&tp->out_of_order_queue, skb);
4654 skb = next;
4614 4655
4615 /* Segment is terminated when we see gap or when 4656 /* Segment is terminated when we see gap or when
4616 * we are at the end of all the queue. */ 4657 * we are at the end of all the queue. */
4617 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 4658 if (!skb ||
4618 after(TCP_SKB_CB(skb)->seq, end) || 4659 after(TCP_SKB_CB(skb)->seq, end) ||
4619 before(TCP_SKB_CB(skb)->end_seq, start)) { 4660 before(TCP_SKB_CB(skb)->end_seq, start)) {
4620 tcp_collapse(sk, &tp->out_of_order_queue, 4661 tcp_collapse(sk, &tp->out_of_order_queue,
4621 head, skb, start, end); 4662 head, skb, start, end);
4622 head = skb; 4663 head = skb;
4623 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 4664 if (!skb)
4624 break; 4665 break;
4625 /* Start new segment */ 4666 /* Start new segment */
4626 start = TCP_SKB_CB(skb)->seq; 4667 start = TCP_SKB_CB(skb)->seq;
@@ -4681,10 +4722,11 @@ static int tcp_prune_queue(struct sock *sk)
4681 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4722 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4682 4723
4683 tcp_collapse_ofo_queue(sk); 4724 tcp_collapse_ofo_queue(sk);
4684 tcp_collapse(sk, &sk->sk_receive_queue, 4725 if (!skb_queue_empty(&sk->sk_receive_queue))
4685 sk->sk_receive_queue.next, 4726 tcp_collapse(sk, &sk->sk_receive_queue,
4686 (struct sk_buff *)&sk->sk_receive_queue, 4727 skb_peek(&sk->sk_receive_queue),
4687 tp->copied_seq, tp->rcv_nxt); 4728 NULL,
4729 tp->copied_seq, tp->rcv_nxt);
4688 sk_mem_reclaim(sk); 4730 sk_mem_reclaim(sk);
4689 4731
4690 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4732 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5d427f86b414..fc79e3416288 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1593,7 +1593,7 @@ process:
1593#endif 1593#endif
1594 { 1594 {
1595 if (!tcp_prequeue(sk, skb)) 1595 if (!tcp_prequeue(sk, skb))
1596 ret = tcp_v4_do_rcv(sk, skb); 1596 ret = tcp_v4_do_rcv(sk, skb);
1597 } 1597 }
1598 } else 1598 } else
1599 sk_add_backlog(sk, skb); 1599 sk_add_backlog(sk, skb);
@@ -2343,7 +2343,7 @@ void tcp4_proc_exit(void)
2343 2343
2344struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2344struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2345{ 2345{
2346 struct iphdr *iph = ip_hdr(skb); 2346 struct iphdr *iph = skb_gro_network_header(skb);
2347 2347
2348 switch (skb->ip_summed) { 2348 switch (skb->ip_summed) {
2349 case CHECKSUM_COMPLETE: 2349 case CHECKSUM_COMPLETE:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 59aec609cec6..79c39dc9b01c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -288,7 +288,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
288 struct tcp_sock *tp = tcp_sk(sk); 288 struct tcp_sock *tp = tcp_sk(sk);
289 289
290 tp->ecn_flags = 0; 290 tp->ecn_flags = 0;
291 if (sysctl_tcp_ecn) { 291 if (sysctl_tcp_ecn == 1) {
292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; 292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
293 tp->ecn_flags = TCP_ECN_OK; 293 tp->ecn_flags = TCP_ECN_OK;
294 } 294 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a8218bc1806a..c3488372f12d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -503,7 +503,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
503 return 0; 503 return 0;
504 504
505 if (!rtnl_trylock()) 505 if (!rtnl_trylock())
506 return -ERESTARTSYS; 506 return restart_syscall();
507 507
508 if (p == &net->ipv6.devconf_all->forwarding) { 508 if (p == &net->ipv6.devconf_all->forwarding) {
509 __s32 newf = net->ipv6.devconf_all->forwarding; 509 __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -591,7 +591,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
591{ 591{
592 struct inet6_ifaddr *ifa = NULL; 592 struct inet6_ifaddr *ifa = NULL;
593 struct rt6_info *rt; 593 struct rt6_info *rt;
594 struct net *net = dev_net(idev->dev);
595 int hash; 594 int hash;
596 int err = 0; 595 int err = 0;
597 int addr_type = ipv6_addr_type(addr); 596 int addr_type = ipv6_addr_type(addr);
@@ -608,7 +607,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
608 goto out2; 607 goto out2;
609 } 608 }
610 609
611 if (idev->cnf.disable_ipv6 || net->ipv6.devconf_all->disable_ipv6) { 610 if (idev->cnf.disable_ipv6) {
612 err = -EACCES; 611 err = -EACCES;
613 goto out2; 612 goto out2;
614 } 613 }
@@ -1520,6 +1519,8 @@ static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
1520 1519
1521int __ipv6_isatap_ifid(u8 *eui, __be32 addr) 1520int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
1522{ 1521{
1522 if (addr == 0)
1523 return -1;
1523 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || 1524 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
1524 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || 1525 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
1525 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || 1526 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
@@ -1750,6 +1751,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1750 __u32 prefered_lft; 1751 __u32 prefered_lft;
1751 int addr_type; 1752 int addr_type;
1752 struct inet6_dev *in6_dev; 1753 struct inet6_dev *in6_dev;
1754 struct net *net = dev_net(dev);
1753 1755
1754 pinfo = (struct prefix_info *) opt; 1756 pinfo = (struct prefix_info *) opt;
1755 1757
@@ -1807,7 +1809,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1807 if (addrconf_finite_timeout(rt_expires)) 1809 if (addrconf_finite_timeout(rt_expires))
1808 rt_expires *= HZ; 1810 rt_expires *= HZ;
1809 1811
1810 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1812 rt = rt6_lookup(net, &pinfo->prefix, NULL,
1811 dev->ifindex, 1); 1813 dev->ifindex, 1);
1812 1814
1813 if (rt && addrconf_is_prefix_route(rt)) { 1815 if (rt && addrconf_is_prefix_route(rt)) {
@@ -1844,7 +1846,6 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1844 struct inet6_ifaddr * ifp; 1846 struct inet6_ifaddr * ifp;
1845 struct in6_addr addr; 1847 struct in6_addr addr;
1846 int create = 0, update_lft = 0; 1848 int create = 0, update_lft = 0;
1847 struct net *net = dev_net(dev);
1848 1849
1849 if (pinfo->prefix_len == 64) { 1850 if (pinfo->prefix_len == 64) {
1850 memcpy(&addr, &pinfo->prefix, 8); 1851 memcpy(&addr, &pinfo->prefix, 8);
@@ -3986,6 +3987,75 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
3986 return addrconf_fixup_forwarding(table, valp, val); 3987 return addrconf_fixup_forwarding(table, valp, val);
3987} 3988}
3988 3989
3990static void dev_disable_change(struct inet6_dev *idev)
3991{
3992 if (!idev || !idev->dev)
3993 return;
3994
3995 if (idev->cnf.disable_ipv6)
3996 addrconf_notify(NULL, NETDEV_DOWN, idev->dev);
3997 else
3998 addrconf_notify(NULL, NETDEV_UP, idev->dev);
3999}
4000
4001static void addrconf_disable_change(struct net *net, __s32 newf)
4002{
4003 struct net_device *dev;
4004 struct inet6_dev *idev;
4005
4006 read_lock(&dev_base_lock);
4007 for_each_netdev(net, dev) {
4008 rcu_read_lock();
4009 idev = __in6_dev_get(dev);
4010 if (idev) {
4011 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
4012 idev->cnf.disable_ipv6 = newf;
4013 if (changed)
4014 dev_disable_change(idev);
4015 }
4016 rcu_read_unlock();
4017 }
4018 read_unlock(&dev_base_lock);
4019}
4020
4021static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
4022{
4023 struct net *net;
4024
4025 net = (struct net *)table->extra2;
4026
4027 if (p == &net->ipv6.devconf_dflt->disable_ipv6)
4028 return 0;
4029
4030 if (!rtnl_trylock())
4031 return restart_syscall();
4032
4033 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4034 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
4035 net->ipv6.devconf_dflt->disable_ipv6 = newf;
4036 addrconf_disable_change(net, newf);
4037 } else if ((!*p) ^ (!old))
4038 dev_disable_change((struct inet6_dev *)table->extra1);
4039
4040 rtnl_unlock();
4041 return 0;
4042}
4043
4044static
4045int addrconf_sysctl_disable(ctl_table *ctl, int write, struct file * filp,
4046 void __user *buffer, size_t *lenp, loff_t *ppos)
4047{
4048 int *valp = ctl->data;
4049 int val = *valp;
4050 int ret;
4051
4052 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
4053
4054 if (write)
4055 ret = addrconf_disable_ipv6(ctl, valp, val);
4056 return ret;
4057}
4058
3989static struct addrconf_sysctl_table 4059static struct addrconf_sysctl_table
3990{ 4060{
3991 struct ctl_table_header *sysctl_header; 4061 struct ctl_table_header *sysctl_header;
@@ -4223,7 +4293,8 @@ static struct addrconf_sysctl_table
4223 .data = &ipv6_devconf.disable_ipv6, 4293 .data = &ipv6_devconf.disable_ipv6,
4224 .maxlen = sizeof(int), 4294 .maxlen = sizeof(int),
4225 .mode = 0644, 4295 .mode = 0644,
4226 .proc_handler = proc_dointvec, 4296 .proc_handler = addrconf_sysctl_disable,
4297 .strategy = sysctl_intvec,
4227 }, 4298 },
4228 { 4299 {
4229 .ctl_name = CTL_UNNUMBERED, 4300 .ctl_name = CTL_UNNUMBERED,
@@ -4344,6 +4415,10 @@ static int addrconf_init_net(struct net *net)
4344 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); 4415 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
4345 if (dflt == NULL) 4416 if (dflt == NULL)
4346 goto err_alloc_dflt; 4417 goto err_alloc_dflt;
4418 } else {
4419 /* these will be inherited by all namespaces */
4420 dflt->autoconf = ipv6_defaults.autoconf;
4421 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4347 } 4422 }
4348 4423
4349 net->ipv6.devconf_all = all; 4424 net->ipv6.devconf_all = all;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 61f55386a236..85b3d0036afd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -72,9 +72,21 @@ MODULE_LICENSE("GPL");
72static struct list_head inetsw6[SOCK_MAX]; 72static struct list_head inetsw6[SOCK_MAX];
73static DEFINE_SPINLOCK(inetsw6_lock); 73static DEFINE_SPINLOCK(inetsw6_lock);
74 74
75static int disable_ipv6 = 0; 75struct ipv6_params ipv6_defaults = {
76module_param_named(disable, disable_ipv6, int, 0); 76 .disable_ipv6 = 0,
77MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional"); 77 .autoconf = 1,
78};
79
80static int disable_ipv6_mod = 0;
81
82module_param_named(disable, disable_ipv6_mod, int, 0444);
83MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");
84
85module_param_named(disable_ipv6, ipv6_defaults.disable_ipv6, int, 0444);
86MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces");
87
88module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444);
89MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces");
78 90
79static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 91static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
80{ 92{
@@ -817,13 +829,20 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
817 struct sk_buff *p; 829 struct sk_buff *p;
818 struct ipv6hdr *iph; 830 struct ipv6hdr *iph;
819 unsigned int nlen; 831 unsigned int nlen;
832 unsigned int hlen;
833 unsigned int off;
820 int flush = 1; 834 int flush = 1;
821 int proto; 835 int proto;
822 __wsum csum; 836 __wsum csum;
823 837
824 iph = skb_gro_header(skb, sizeof(*iph)); 838 off = skb_gro_offset(skb);
825 if (unlikely(!iph)) 839 hlen = off + sizeof(*iph);
826 goto out; 840 iph = skb_gro_header_fast(skb, off);
841 if (skb_gro_header_hard(skb, hlen)) {
842 iph = skb_gro_header_slow(skb, hlen, off);
843 if (unlikely(!iph))
844 goto out;
845 }
827 846
828 skb_gro_pull(skb, sizeof(*iph)); 847 skb_gro_pull(skb, sizeof(*iph));
829 skb_set_transport_header(skb, skb_gro_offset(skb)); 848 skb_set_transport_header(skb, skb_gro_offset(skb));
@@ -1031,7 +1050,7 @@ static int __init inet6_init(void)
1031 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) 1050 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1032 INIT_LIST_HEAD(r); 1051 INIT_LIST_HEAD(r);
1033 1052
1034 if (disable_ipv6) { 1053 if (disable_ipv6_mod) {
1035 printk(KERN_INFO 1054 printk(KERN_INFO
1036 "IPv6: Loaded, but administratively disabled, " 1055 "IPv6: Loaded, but administratively disabled, "
1037 "reboot required to enable\n"); 1056 "reboot required to enable\n");
@@ -1220,7 +1239,7 @@ module_init(inet6_init);
1220 1239
1221static void __exit inet6_exit(void) 1240static void __exit inet6_exit(void)
1222{ 1241{
1223 if (disable_ipv6) 1242 if (disable_ipv6_mod)
1224 return; 1243 return;
1225 1244
1226 /* First of all disallow new sockets creation. */ 1245 /* First of all disallow new sockets creation. */
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index f5de3f9dc692..00a7a5e4ac97 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -151,7 +151,7 @@ static const struct nla_policy fib6_rule_policy[FRA_MAX+1] = {
151}; 151};
152 152
153static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 153static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
154 struct nlmsghdr *nlh, struct fib_rule_hdr *frh, 154 struct fib_rule_hdr *frh,
155 struct nlattr **tb) 155 struct nlattr **tb)
156{ 156{
157 int err = -EINVAL; 157 int err = -EINVAL;
@@ -211,7 +211,7 @@ static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
211} 211}
212 212
213static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 213static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
214 struct nlmsghdr *nlh, struct fib_rule_hdr *frh) 214 struct fib_rule_hdr *frh)
215{ 215{
216 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 216 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
217 217
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 8f04bd9da274..bc1a920c34a1 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -70,7 +70,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
70 70
71 idev = __in6_dev_get(skb->dev); 71 idev = __in6_dev_get(skb->dev);
72 72
73 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INRECEIVES); 73 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len);
74 74
75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || 75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
76 !idev || unlikely(idev->cnf.disable_ipv6)) { 76 !idev || unlikely(idev->cnf.disable_ipv6)) {
@@ -242,8 +242,9 @@ int ip6_mc_input(struct sk_buff *skb)
242 struct ipv6hdr *hdr; 242 struct ipv6hdr *hdr;
243 int deliver; 243 int deliver;
244 244
245 IP6_INC_STATS_BH(dev_net(skb->dst->dev), 245 IP6_UPD_PO_STATS_BH(dev_net(skb->dst->dev),
246 ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS); 246 ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCAST,
247 skb->len);
247 248
248 hdr = ipv6_hdr(skb); 249 hdr = ipv6_hdr(skb);
249 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); 250 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9fb49c3b518a..735a2bf4b5f1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -159,7 +159,8 @@ static int ip6_output2(struct sk_buff *skb)
159 } 159 }
160 } 160 }
161 161
162 IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCASTPKTS); 162 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
163 skb->len);
163 } 164 }
164 165
165 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev, 166 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
@@ -275,8 +276,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
275 276
276 mtu = dst_mtu(dst); 277 mtu = dst_mtu(dst);
277 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { 278 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
278 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), 279 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb->dst),
279 IPSTATS_MIB_OUTREQUESTS); 280 IPSTATS_MIB_OUT, skb->len);
280 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 281 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
281 dst_output); 282 dst_output);
282 } 283 }
@@ -1516,7 +1517,7 @@ int ip6_push_pending_frames(struct sock *sk)
1516 skb->mark = sk->sk_mark; 1517 skb->mark = sk->sk_mark;
1517 1518
1518 skb->dst = dst_clone(&rt->u.dst); 1519 skb->dst = dst_clone(&rt->u.dst);
1519 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); 1520 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1520 if (proto == IPPROTO_ICMPV6) { 1521 if (proto == IPPROTO_ICMPV6) {
1521 struct inet6_dev *idev = ip6_dst_idev(skb->dst); 1522 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
1522 1523
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d994c55a5b16..af256d47fd35 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1100,8 +1100,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1100 struct ip6_tnl_parm *p = &t->parms; 1100 struct ip6_tnl_parm *p = &t->parms;
1101 struct flowi *fl = &t->fl; 1101 struct flowi *fl = &t->fl;
1102 1102
1103 memcpy(&dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1103 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1104 memcpy(&dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1104 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1105 1105
1106 /* Set up flowi template */ 1106 /* Set up flowi template */
1107 ipv6_addr_copy(&fl->fl6_src, &p->laddr); 1107 ipv6_addr_copy(&fl->fl6_src, &p->laddr);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index a51fb33e6864..4b48819a5b8d 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1449,7 +1449,8 @@ static void mld_sendpack(struct sk_buff *skb)
1449 int err; 1449 int err;
1450 struct flowi fl; 1450 struct flowi fl;
1451 1451
1452 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS); 1452 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1453
1453 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1454 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
1454 mldlen = skb->tail - skb->transport_header; 1455 mldlen = skb->tail - skb->transport_header;
1455 pip6->payload_len = htons(payload_len); 1456 pip6->payload_len = htons(payload_len);
@@ -1473,13 +1474,15 @@ static void mld_sendpack(struct sk_buff *skb)
1473 if (err) 1474 if (err)
1474 goto err_out; 1475 goto err_out;
1475 1476
1477 payload_len = skb->len;
1478
1476 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1479 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1477 dst_output); 1480 dst_output);
1478out: 1481out:
1479 if (!err) { 1482 if (!err) {
1480 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT); 1483 ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
1481 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 1484 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1482 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTMCASTPKTS); 1485 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1483 } else 1486 } else
1484 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); 1487 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1485 1488
@@ -1773,10 +1776,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1773 IPV6_TLV_PADN, 0 }; 1776 IPV6_TLV_PADN, 0 };
1774 struct flowi fl; 1777 struct flowi fl;
1775 1778
1776 rcu_read_lock();
1777 IP6_INC_STATS(net, __in6_dev_get(dev),
1778 IPSTATS_MIB_OUTREQUESTS);
1779 rcu_read_unlock();
1780 if (type == ICMPV6_MGM_REDUCTION) 1779 if (type == ICMPV6_MGM_REDUCTION)
1781 snd_addr = &in6addr_linklocal_allrouters; 1780 snd_addr = &in6addr_linklocal_allrouters;
1782 else 1781 else
@@ -1786,6 +1785,11 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1786 payload_len = len + sizeof(ra); 1785 payload_len = len + sizeof(ra);
1787 full_len = sizeof(struct ipv6hdr) + payload_len; 1786 full_len = sizeof(struct ipv6hdr) + payload_len;
1788 1787
1788 rcu_read_lock();
1789 IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
1790 IPSTATS_MIB_OUT, full_len);
1791 rcu_read_unlock();
1792
1789 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); 1793 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
1790 1794
1791 if (skb == NULL) { 1795 if (skb == NULL) {
@@ -1838,13 +1842,14 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1838 if (err) 1842 if (err)
1839 goto err_out; 1843 goto err_out;
1840 1844
1845
1841 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1846 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1842 dst_output); 1847 dst_output);
1843out: 1848out:
1844 if (!err) { 1849 if (!err) {
1845 ICMP6MSGOUT_INC_STATS(net, idev, type); 1850 ICMP6MSGOUT_INC_STATS(net, idev, type);
1846 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1851 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1847 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTMCASTPKTS); 1852 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1848 } else 1853 } else
1849 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1854 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1850 1855
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9f061d1adbc2..1d13d9964985 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -465,8 +465,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
465 1, &err); 465 1, &err);
466 if (!skb) { 466 if (!skb) {
467 ND_PRINTK0(KERN_ERR 467 ND_PRINTK0(KERN_ERR
468 "ICMPv6 ND: %s() failed to allocate an skb.\n", 468 "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
469 __func__); 469 __func__, err);
470 return NULL; 470 return NULL;
471 } 471 }
472 472
@@ -533,7 +533,7 @@ void ndisc_send_skb(struct sk_buff *skb,
533 skb->dst = dst; 533 skb->dst = dst;
534 534
535 idev = in6_dev_get(dst->dev); 535 idev = in6_dev_get(dst->dev);
536 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS); 536 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
537 537
538 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 538 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
539 dst_output); 539 dst_output);
@@ -658,6 +658,7 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
658 &icmp6h, NULL, 658 &icmp6h, NULL,
659 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0); 659 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0);
660} 660}
661EXPORT_SYMBOL(ndisc_send_rs);
661 662
662 663
663static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb) 664static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
@@ -1561,8 +1562,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1561 1, &err); 1562 1, &err);
1562 if (buff == NULL) { 1563 if (buff == NULL) {
1563 ND_PRINTK0(KERN_ERR 1564 ND_PRINTK0(KERN_ERR
1564 "ICMPv6 Redirect: %s() failed to allocate an skb.\n", 1565 "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n",
1565 __func__); 1566 __func__, err);
1566 goto release; 1567 goto release;
1567 } 1568 }
1568 1569
@@ -1613,7 +1614,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1613 1614
1614 buff->dst = dst; 1615 buff->dst = dst;
1615 idev = in6_dev_get(dst->dev); 1616 idev = in6_dev_get(dst->dev);
1616 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS); 1617 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1617 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, 1618 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
1618 dst_output); 1619 dst_output);
1619 if (!err) { 1620 if (!err) {
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 97c17fdd6f75..590ddefb7ffc 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -61,7 +61,7 @@ static const struct file_operations sockstat6_seq_fops = {
61 61
62static struct snmp_mib snmp6_ipstats_list[] = { 62static struct snmp_mib snmp6_ipstats_list[] = {
63/* ipv6 mib according to RFC 2465 */ 63/* ipv6 mib according to RFC 2465 */
64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INRECEIVES), 64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), 65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
66 SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS), 66 SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS),
67 SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES), 67 SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES),
@@ -71,7 +71,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
71 SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS), 71 SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS),
72 SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS), 72 SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS),
73 SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), 73 SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
74 SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS), 74 SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTPKTS),
75 SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS), 75 SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS),
76 SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), 76 SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
77 SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), 77 SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
@@ -83,6 +83,12 @@ static struct snmp_mib snmp6_ipstats_list[] = {
83 SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES), 83 SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES),
84 SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS), 84 SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
85 SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), 85 SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
86 SNMP_MIB_ITEM("Ip6InOctets", IPSTATS_MIB_INOCTETS),
87 SNMP_MIB_ITEM("Ip6OutOctets", IPSTATS_MIB_OUTOCTETS),
88 SNMP_MIB_ITEM("Ip6InMcastOctets", IPSTATS_MIB_INMCASTOCTETS),
89 SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
90 SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
91 SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
86 SNMP_MIB_SENTINEL 92 SNMP_MIB_SENTINEL
87}; 93};
88 94
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 61f6827e5906..e99307fba0b1 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -638,7 +638,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
638 if (err) 638 if (err)
639 goto error_fault; 639 goto error_fault;
640 640
641 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); 641 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
642 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, 642 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
643 dst_output); 643 dst_output);
644 if (err > 0) 644 if (err > 0)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 664ab82e03b2..b3a59bd40f01 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -15,6 +15,7 @@
15 * Roger Venning <r.venning@telstra.com>: 6to4 support 15 * Roger Venning <r.venning@telstra.com>: 6to4 support
16 * Nate Thompson <nate@thebog.net>: 6to4 support 16 * Nate Thompson <nate@thebog.net>: 6to4 support
17 * Fred Templin <fred.l.templin@boeing.com>: isatap support 17 * Fred Templin <fred.l.templin@boeing.com>: isatap support
18 * Sascha Hlusiak <mail@saschahlusiak.de>: stateless autoconf for isatap
18 */ 19 */
19 20
20#include <linux/module.h> 21#include <linux/module.h>
@@ -80,7 +81,7 @@ struct sit_net {
80static DEFINE_RWLOCK(ipip6_lock); 81static DEFINE_RWLOCK(ipip6_lock);
81 82
82static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 83static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
83 __be32 remote, __be32 local) 84 struct net_device *dev, __be32 remote, __be32 local)
84{ 85{
85 unsigned h0 = HASH(remote); 86 unsigned h0 = HASH(remote);
86 unsigned h1 = HASH(local); 87 unsigned h1 = HASH(local);
@@ -89,18 +90,25 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
89 90
90 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) { 91 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) {
91 if (local == t->parms.iph.saddr && 92 if (local == t->parms.iph.saddr &&
92 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 93 remote == t->parms.iph.daddr &&
94 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
95 (t->dev->flags & IFF_UP))
93 return t; 96 return t;
94 } 97 }
95 for (t = sitn->tunnels_r[h0]; t; t = t->next) { 98 for (t = sitn->tunnels_r[h0]; t; t = t->next) {
96 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 99 if (remote == t->parms.iph.daddr &&
100 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
101 (t->dev->flags & IFF_UP))
97 return t; 102 return t;
98 } 103 }
99 for (t = sitn->tunnels_l[h1]; t; t = t->next) { 104 for (t = sitn->tunnels_l[h1]; t; t = t->next) {
100 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 105 if (local == t->parms.iph.saddr &&
106 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
107 (t->dev->flags & IFF_UP))
101 return t; 108 return t;
102 } 109 }
103 if ((t = sitn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) 110 t = sitn->tunnels_wc[0];
111 if ((t != NULL) && (t->dev->flags & IFF_UP))
104 return t; 112 return t;
105 return NULL; 113 return NULL;
106} 114}
@@ -165,8 +173,14 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
165 struct sit_net *sitn = net_generic(net, sit_net_id); 173 struct sit_net *sitn = net_generic(net, sit_net_id);
166 174
167 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) { 175 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) {
168 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 176 if (local == t->parms.iph.saddr &&
169 return t; 177 remote == t->parms.iph.daddr &&
178 parms->link == t->parms.link) {
179 if (create)
180 return NULL;
181 else
182 return t;
183 }
170 } 184 }
171 if (!create) 185 if (!create)
172 goto failed; 186 goto failed;
@@ -209,6 +223,44 @@ failed:
209 return NULL; 223 return NULL;
210} 224}
211 225
226static void ipip6_tunnel_rs_timer(unsigned long data)
227{
228 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *) data;
229 struct inet6_dev *ifp;
230 struct inet6_ifaddr *addr;
231
232 spin_lock(&p->lock);
233 ifp = __in6_dev_get(p->tunnel->dev);
234
235 read_lock_bh(&ifp->lock);
236 for (addr = ifp->addr_list; addr; addr = addr->if_next) {
237 struct in6_addr rtr;
238
239 if (!(ipv6_addr_type(&addr->addr) & IPV6_ADDR_LINKLOCAL))
240 continue;
241
242 /* Send RS to guessed linklocal address of router
243 *
244 * Better: send to ff02::2 encapsuled in unicast directly
245 * to router-v4 instead of guessing the v6 address.
246 *
247 * Cisco/Windows seem to not set the u/l bit correctly,
248 * so we won't guess right.
249 */
250 ipv6_addr_set(&rtr, htonl(0xFE800000), 0, 0, 0);
251 if (!__ipv6_isatap_ifid(rtr.s6_addr + 8,
252 p->addr)) {
253 ndisc_send_rs(p->tunnel->dev, &addr->addr, &rtr);
254 }
255 }
256 read_unlock_bh(&ifp->lock);
257
258 mod_timer(&p->rs_timer, jiffies + HZ * p->rs_delay);
259 spin_unlock(&p->lock);
260
261 return;
262}
263
212static struct ip_tunnel_prl_entry * 264static struct ip_tunnel_prl_entry *
213__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) 265__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
214{ 266{
@@ -267,6 +319,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
267 continue; 319 continue;
268 kp[c].addr = prl->addr; 320 kp[c].addr = prl->addr;
269 kp[c].flags = prl->flags; 321 kp[c].flags = prl->flags;
322 kp[c].rs_delay = prl->rs_delay;
270 c++; 323 c++;
271 if (kprl.addr != htonl(INADDR_ANY)) 324 if (kprl.addr != htonl(INADDR_ANY))
272 break; 325 break;
@@ -316,11 +369,23 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
316 } 369 }
317 370
318 p->next = t->prl; 371 p->next = t->prl;
372 p->tunnel = t;
319 t->prl = p; 373 t->prl = p;
320 t->prl_count++; 374 t->prl_count++;
375
376 spin_lock_init(&p->lock);
377 setup_timer(&p->rs_timer, ipip6_tunnel_rs_timer, (unsigned long) p);
321update: 378update:
322 p->addr = a->addr; 379 p->addr = a->addr;
323 p->flags = a->flags; 380 p->flags = a->flags;
381 p->rs_delay = a->rs_delay;
382 if (p->rs_delay == 0)
383 p->rs_delay = IPTUNNEL_RS_DEFAULT_DELAY;
384 spin_lock(&p->lock);
385 del_timer(&p->rs_timer);
386 if (p->flags & PRL_DEFAULT)
387 mod_timer(&p->rs_timer, jiffies + 1);
388 spin_unlock(&p->lock);
324out: 389out:
325 write_unlock(&ipip6_lock); 390 write_unlock(&ipip6_lock);
326 return err; 391 return err;
@@ -339,6 +404,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
339 if ((*p)->addr == a->addr) { 404 if ((*p)->addr == a->addr) {
340 x = *p; 405 x = *p;
341 *p = x->next; 406 *p = x->next;
407 spin_lock(&x->lock);
408 del_timer(&x->rs_timer);
409 spin_unlock(&x->lock);
342 kfree(x); 410 kfree(x);
343 t->prl_count--; 411 t->prl_count--;
344 goto out; 412 goto out;
@@ -349,13 +417,16 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
349 while (t->prl) { 417 while (t->prl) {
350 x = t->prl; 418 x = t->prl;
351 t->prl = t->prl->next; 419 t->prl = t->prl->next;
420 spin_lock(&x->lock);
421 del_timer(&x->rs_timer);
422 spin_unlock(&x->lock);
352 kfree(x); 423 kfree(x);
353 t->prl_count--; 424 t->prl_count--;
354 } 425 }
355 } 426 }
356out: 427out:
357 write_unlock(&ipip6_lock); 428 write_unlock(&ipip6_lock);
358 return 0; 429 return err;
359} 430}
360 431
361static int 432static int
@@ -446,7 +517,10 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
446 err = -ENOENT; 517 err = -ENOENT;
447 518
448 read_lock(&ipip6_lock); 519 read_lock(&ipip6_lock);
449 t = ipip6_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 520 t = ipip6_tunnel_lookup(dev_net(skb->dev),
521 skb->dev,
522 iph->daddr,
523 iph->saddr);
450 if (t == NULL || t->parms.iph.daddr == 0) 524 if (t == NULL || t->parms.iph.daddr == 0)
451 goto out; 525 goto out;
452 526
@@ -481,8 +555,9 @@ static int ipip6_rcv(struct sk_buff *skb)
481 iph = ip_hdr(skb); 555 iph = ip_hdr(skb);
482 556
483 read_lock(&ipip6_lock); 557 read_lock(&ipip6_lock);
484 if ((tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), 558 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
485 iph->saddr, iph->daddr)) != NULL) { 559 iph->saddr, iph->daddr);
560 if (tunnel != NULL) {
486 secpath_reset(skb); 561 secpath_reset(skb);
487 skb->mac_header = skb->network_header; 562 skb->mac_header = skb->network_header;
488 skb_reset_network_header(skb); 563 skb_reset_network_header(skb);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 711175e0571f..8c2513982b61 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -131,7 +131,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
131 int mssind; 131 int mssind;
132 const __u16 mss = *mssp; 132 const __u16 mss = *mssp;
133 133
134 tcp_sk(sk)->last_synq_overflow = jiffies; 134 tcp_synq_overflow(sk);
135 135
136 for (mssind = 0; mss > msstab[mssind + 1]; mssind++) 136 for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
137 ; 137 ;
@@ -175,7 +175,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
175 if (!sysctl_tcp_syncookies || !th->ack) 175 if (!sysctl_tcp_syncookies || !th->ack)
176 goto out; 176 goto out;
177 177
178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 178 if (tcp_synq_no_recent_overflow(sk) ||
179 (mss = cookie_check(skb, cookie)) == 0) { 179 (mss = cookie_check(skb, cookie)) == 0) {
180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); 180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
181 goto out; 181 goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4b5aa1854260..ea37741062a9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -941,9 +941,10 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
941 return 0; 941 return 0;
942} 942}
943 943
944struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) 944static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
945 struct sk_buff *skb)
945{ 946{
946 struct ipv6hdr *iph = ipv6_hdr(skb); 947 struct ipv6hdr *iph = skb_gro_network_header(skb);
947 948
948 switch (skb->ip_summed) { 949 switch (skb->ip_summed) {
949 case CHECKSUM_COMPLETE: 950 case CHECKSUM_COMPLETE:
@@ -961,9 +962,8 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
961 962
962 return tcp_gro_receive(head, skb); 963 return tcp_gro_receive(head, skb);
963} 964}
964EXPORT_SYMBOL(tcp6_gro_receive);
965 965
966int tcp6_gro_complete(struct sk_buff *skb) 966static int tcp6_gro_complete(struct sk_buff *skb)
967{ 967{
968 struct ipv6hdr *iph = ipv6_hdr(skb); 968 struct ipv6hdr *iph = ipv6_hdr(skb);
969 struct tcphdr *th = tcp_hdr(skb); 969 struct tcphdr *th = tcp_hdr(skb);
@@ -974,7 +974,6 @@ int tcp6_gro_complete(struct sk_buff *skb)
974 974
975 return tcp_gro_complete(skb); 975 return tcp_gro_complete(skb);
976} 976}
977EXPORT_SYMBOL(tcp6_gro_complete);
978 977
979static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 978static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
980 u32 ts, struct tcp_md5sig_key *key, int rst) 979 u32 ts, struct tcp_md5sig_key *key, int rst)
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 2562ebc1b22c..7af2e74deda8 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -982,17 +982,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
982{ 982{
983 struct sk_buff *tx_skb; 983 struct sk_buff *tx_skb;
984 struct sk_buff *skb; 984 struct sk_buff *skb;
985 int count;
986 985
987 IRDA_ASSERT(self != NULL, return;); 986 IRDA_ASSERT(self != NULL, return;);
988 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 987 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
989 988
990 /* Initialize variables */
991 count = skb_queue_len(&self->wx_list);
992
993 /* Resend unacknowledged frame(s) */ 989 /* Resend unacknowledged frame(s) */
994 skb = skb_peek(&self->wx_list); 990 skb_queue_walk(&self->wx_list, skb) {
995 while (skb != NULL) {
996 irlap_wait_min_turn_around(self, &self->qos_tx); 991 irlap_wait_min_turn_around(self, &self->qos_tx);
997 992
998 /* We copy the skb to be retransmitted since we will have to 993 /* We copy the skb to be retransmitted since we will have to
@@ -1011,21 +1006,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1011 /* 1006 /*
1012 * Set poll bit on the last frame retransmitted 1007 * Set poll bit on the last frame retransmitted
1013 */ 1008 */
1014 if (count-- == 1) 1009 if (skb_queue_is_last(&self->wx_list, skb))
1015 tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ 1010 tx_skb->data[1] |= PF_BIT; /* Set p/f bit */
1016 else 1011 else
1017 tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */ 1012 tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */
1018 1013
1019 irlap_send_i_frame(self, tx_skb, command); 1014 irlap_send_i_frame(self, tx_skb, command);
1020
1021 /*
1022 * If our skb is the last buffer in the list, then
1023 * we are finished, if not, move to the next sk-buffer
1024 */
1025 if (skb == skb_peek_tail(&self->wx_list))
1026 skb = NULL;
1027 else
1028 skb = skb->next;
1029 } 1015 }
1030#if 0 /* Not yet */ 1016#if 0 /* Not yet */
1031 /* 1017 /*
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 2f05ec1037ab..8dd7ed7e7c1f 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -87,7 +87,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
87 if (!dev) 87 if (!dev)
88 return -ENODEV; 88 return -ENODEV;
89 89
90 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 90 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
91 if (!msg) { 91 if (!msg) {
92 dev_put(dev); 92 dev_put(dev);
93 return -ENOMEM; 93 return -ENOMEM;
@@ -148,21 +148,8 @@ static struct genl_ops irda_nl_ops[] = {
148 148
149int irda_nl_register(void) 149int irda_nl_register(void)
150{ 150{
151 int err, i; 151 return genl_register_family_with_ops(&irda_nl_family,
152 152 irda_nl_ops, ARRAY_SIZE(irda_nl_ops));
153 err = genl_register_family(&irda_nl_family);
154 if (err)
155 return err;
156
157 for (i = 0; i < ARRAY_SIZE(irda_nl_ops); i++) {
158 err = genl_register_ops(&irda_nl_family, &irda_nl_ops[i]);
159 if (err)
160 goto err_out;
161 }
162 return 0;
163 err_out:
164 genl_unregister_family(&irda_nl_family);
165 return err;
166} 153}
167 154
168void irda_nl_unregister(void) 155void irda_nl_unregister(void)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index b51c9187c347..a9b3a6f9ea95 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -29,10 +29,7 @@
29#include <net/iucv/iucv.h> 29#include <net/iucv/iucv.h>
30#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
31 31
32#define CONFIG_IUCV_SOCK_DEBUG 1 32#define VERSION "1.1"
33
34#define IPRMDATA 0x80
35#define VERSION "1.0"
36 33
37static char iucv_userid[80]; 34static char iucv_userid[80];
38 35
@@ -44,6 +41,19 @@ static struct proto iucv_proto = {
44 .obj_size = sizeof(struct iucv_sock), 41 .obj_size = sizeof(struct iucv_sock),
45}; 42};
46 43
44/* special AF_IUCV IPRM messages */
45static const u8 iprm_shutdown[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
47
48#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
49
50/* macros to set/get socket control buffer at correct offset */
51#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
52#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
53#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54#define CB_TRGCLS_LEN (TRGCLS_SIZE)
55
56
47static void iucv_sock_kill(struct sock *sk); 57static void iucv_sock_kill(struct sock *sk);
48static void iucv_sock_close(struct sock *sk); 58static void iucv_sock_close(struct sock *sk);
49 59
@@ -54,6 +64,7 @@ static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
54static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], 64static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
55 u8 ipuser[16]); 65 u8 ipuser[16]);
56static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 66static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
67static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
57 68
58static struct iucv_sock_list iucv_sk_list = { 69static struct iucv_sock_list iucv_sk_list = {
59 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 70 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
@@ -65,7 +76,8 @@ static struct iucv_handler af_iucv_handler = {
65 .path_complete = iucv_callback_connack, 76 .path_complete = iucv_callback_connack,
66 .path_severed = iucv_callback_connrej, 77 .path_severed = iucv_callback_connrej,
67 .message_pending = iucv_callback_rx, 78 .message_pending = iucv_callback_rx,
68 .message_complete = iucv_callback_txdone 79 .message_complete = iucv_callback_txdone,
80 .path_quiesced = iucv_callback_shutdown,
69}; 81};
70 82
71static inline void high_nmcpy(unsigned char *dst, char *src) 83static inline void high_nmcpy(unsigned char *dst, char *src)
@@ -78,6 +90,37 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
78 memcpy(&dst[8], src, 8); 90 memcpy(&dst[8], src, 8);
79} 91}
80 92
93/**
94 * iucv_msg_length() - Returns the length of an iucv message.
95 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
96 *
97 * The function returns the length of the specified iucv message @msg of data
98 * stored in a buffer and of data stored in the parameter list (PRMDATA).
99 *
100 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
101 * data:
102 * PRMDATA[0..6] socket data (max 7 bytes);
103 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
104 *
105 * The socket data length is computed by substracting the socket data length
106 * value from 0xFF.
107 * If the socket data len is greater 7, then PRMDATA can be used for special
108 * notifications (see iucv_sock_shutdown); and further,
109 * if the socket data len is > 7, the function returns 8.
110 *
111 * Use this function to allocate socket buffers to store iucv message data.
112 */
113static inline size_t iucv_msg_length(struct iucv_message *msg)
114{
115 size_t datalen;
116
117 if (msg->flags & IUCV_IPRMDATA) {
118 datalen = 0xff - msg->rmmsg[7];
119 return (datalen < 8) ? datalen : 8;
120 }
121 return msg->length;
122}
123
81/* Timers */ 124/* Timers */
82static void iucv_sock_timeout(unsigned long arg) 125static void iucv_sock_timeout(unsigned long arg)
83{ 126{
@@ -225,6 +268,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
225 spin_lock_init(&iucv_sk(sk)->message_q.lock); 268 spin_lock_init(&iucv_sk(sk)->message_q.lock);
226 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 269 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
227 iucv_sk(sk)->send_tag = 0; 270 iucv_sk(sk)->send_tag = 0;
271 iucv_sk(sk)->flags = 0;
272 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
228 iucv_sk(sk)->path = NULL; 273 iucv_sk(sk)->path = NULL;
229 memset(&iucv_sk(sk)->src_user_id , 0, 32); 274 memset(&iucv_sk(sk)->src_user_id , 0, 32);
230 275
@@ -248,11 +293,22 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
248{ 293{
249 struct sock *sk; 294 struct sock *sk;
250 295
251 if (sock->type != SOCK_STREAM) 296 if (protocol && protocol != PF_IUCV)
252 return -ESOCKTNOSUPPORT; 297 return -EPROTONOSUPPORT;
253 298
254 sock->state = SS_UNCONNECTED; 299 sock->state = SS_UNCONNECTED;
255 sock->ops = &iucv_sock_ops; 300
301 switch (sock->type) {
302 case SOCK_STREAM:
303 sock->ops = &iucv_sock_ops;
304 break;
305 case SOCK_SEQPACKET:
306 /* currently, proto ops can handle both sk types */
307 sock->ops = &iucv_sock_ops;
308 break;
309 default:
310 return -ESOCKTNOSUPPORT;
311 }
256 312
257 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 313 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
258 if (!sk) 314 if (!sk)
@@ -463,11 +519,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
463 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 519 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
464 return -EBADFD; 520 return -EBADFD;
465 521
466 if (sk->sk_type != SOCK_STREAM) 522 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
467 return -EINVAL; 523 return -EINVAL;
468 524
469 iucv = iucv_sk(sk);
470
471 if (sk->sk_state == IUCV_OPEN) { 525 if (sk->sk_state == IUCV_OPEN) {
472 err = iucv_sock_autobind(sk); 526 err = iucv_sock_autobind(sk);
473 if (unlikely(err)) 527 if (unlikely(err))
@@ -486,8 +540,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
486 540
487 iucv = iucv_sk(sk); 541 iucv = iucv_sk(sk);
488 /* Create path. */ 542 /* Create path. */
489 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, 543 iucv->path = iucv_path_alloc(iucv->msglimit,
490 IPRMDATA, GFP_KERNEL); 544 IUCV_IPRMDATA, GFP_KERNEL);
491 if (!iucv->path) { 545 if (!iucv->path) {
492 err = -ENOMEM; 546 err = -ENOMEM;
493 goto done; 547 goto done;
@@ -521,8 +575,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
521 } 575 }
522 576
523 if (sk->sk_state == IUCV_DISCONN) { 577 if (sk->sk_state == IUCV_DISCONN) {
524 release_sock(sk); 578 err = -ECONNREFUSED;
525 return -ECONNREFUSED;
526 } 579 }
527 580
528 if (err) { 581 if (err) {
@@ -545,7 +598,10 @@ static int iucv_sock_listen(struct socket *sock, int backlog)
545 lock_sock(sk); 598 lock_sock(sk);
546 599
547 err = -EINVAL; 600 err = -EINVAL;
548 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) 601 if (sk->sk_state != IUCV_BOUND)
602 goto done;
603
604 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
549 goto done; 605 goto done;
550 606
551 sk->sk_max_ack_backlog = backlog; 607 sk->sk_max_ack_backlog = backlog;
@@ -636,6 +692,30 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
636 return 0; 692 return 0;
637} 693}
638 694
695/**
696 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
697 * @path: IUCV path
698 * @msg: Pointer to a struct iucv_message
699 * @skb: The socket data to send, skb->len MUST BE <= 7
700 *
701 * Send the socket data in the parameter list in the iucv message
702 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
703 * list and the socket data len at index 7 (last byte).
704 * See also iucv_msg_length().
705 *
706 * Returns the error code from the iucv_message_send() call.
707 */
708static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
709 struct sk_buff *skb)
710{
711 u8 prmdata[8];
712
713 memcpy(prmdata, (void *) skb->data, skb->len);
714 prmdata[7] = 0xff - (u8) skb->len;
715 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
716 (void *) prmdata, 8);
717}
718
639static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 719static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
640 struct msghdr *msg, size_t len) 720 struct msghdr *msg, size_t len)
641{ 721{
@@ -643,6 +723,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
643 struct iucv_sock *iucv = iucv_sk(sk); 723 struct iucv_sock *iucv = iucv_sk(sk);
644 struct sk_buff *skb; 724 struct sk_buff *skb;
645 struct iucv_message txmsg; 725 struct iucv_message txmsg;
726 struct cmsghdr *cmsg;
727 int cmsg_done;
646 char user_id[9]; 728 char user_id[9];
647 char appl_id[9]; 729 char appl_id[9];
648 int err; 730 int err;
@@ -654,6 +736,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
654 if (msg->msg_flags & MSG_OOB) 736 if (msg->msg_flags & MSG_OOB)
655 return -EOPNOTSUPP; 737 return -EOPNOTSUPP;
656 738
739 /* SOCK_SEQPACKET: we do not support segmented records */
740 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
741 return -EOPNOTSUPP;
742
657 lock_sock(sk); 743 lock_sock(sk);
658 744
659 if (sk->sk_shutdown & SEND_SHUTDOWN) { 745 if (sk->sk_shutdown & SEND_SHUTDOWN) {
@@ -662,6 +748,52 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
662 } 748 }
663 749
664 if (sk->sk_state == IUCV_CONNECTED) { 750 if (sk->sk_state == IUCV_CONNECTED) {
751 /* initialize defaults */
752 cmsg_done = 0; /* check for duplicate headers */
753 txmsg.class = 0;
754
755 /* iterate over control messages */
756 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
757 cmsg = CMSG_NXTHDR(msg, cmsg)) {
758
759 if (!CMSG_OK(msg, cmsg)) {
760 err = -EINVAL;
761 goto out;
762 }
763
764 if (cmsg->cmsg_level != SOL_IUCV)
765 continue;
766
767 if (cmsg->cmsg_type & cmsg_done) {
768 err = -EINVAL;
769 goto out;
770 }
771 cmsg_done |= cmsg->cmsg_type;
772
773 switch (cmsg->cmsg_type) {
774 case SCM_IUCV_TRGCLS:
775 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
776 err = -EINVAL;
777 goto out;
778 }
779
780 /* set iucv message target class */
781 memcpy(&txmsg.class,
782 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
783
784 break;
785
786 default:
787 err = -EINVAL;
788 goto out;
789 break;
790 }
791 }
792
793 /* allocate one skb for each iucv message:
794 * this is fine for SOCK_SEQPACKET (unless we want to support
795 * segmented records using the MSG_EOR flag), but
796 * for SOCK_STREAM we might want to improve it in future */
665 if (!(skb = sock_alloc_send_skb(sk, len, 797 if (!(skb = sock_alloc_send_skb(sk, len,
666 msg->msg_flags & MSG_DONTWAIT, 798 msg->msg_flags & MSG_DONTWAIT,
667 &err))) 799 &err)))
@@ -672,13 +804,33 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
672 goto fail; 804 goto fail;
673 } 805 }
674 806
675 txmsg.class = 0; 807 /* increment and save iucv message tag for msg_completion cbk */
676 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
677 txmsg.tag = iucv->send_tag++; 808 txmsg.tag = iucv->send_tag++;
678 memcpy(skb->cb, &txmsg.tag, 4); 809 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
679 skb_queue_tail(&iucv->send_skb_q, skb); 810 skb_queue_tail(&iucv->send_skb_q, skb);
680 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 811
681 (void *) skb->data, skb->len); 812 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
813 && skb->len <= 7) {
814 err = iucv_send_iprm(iucv->path, &txmsg, skb);
815
816 /* on success: there is no message_complete callback
817 * for an IPRMDATA msg; remove skb from send queue */
818 if (err == 0) {
819 skb_unlink(skb, &iucv->send_skb_q);
820 kfree_skb(skb);
821 }
822
823 /* this error should never happen since the
824 * IUCV_IPRMDATA path flag is set... sever path */
825 if (err == 0x15) {
826 iucv_path_sever(iucv->path, NULL);
827 skb_unlink(skb, &iucv->send_skb_q);
828 err = -EPIPE;
829 goto fail;
830 }
831 } else
832 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
833 (void *) skb->data, skb->len);
682 if (err) { 834 if (err) {
683 if (err == 3) { 835 if (err == 3) {
684 user_id[8] = 0; 836 user_id[8] = 0;
@@ -725,6 +877,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
725 if (!nskb) 877 if (!nskb)
726 return -ENOMEM; 878 return -ENOMEM;
727 879
880 /* copy target class to control buffer of new skb */
881 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
882
883 /* copy data fragment */
728 memcpy(nskb->data, skb->data + copied, size); 884 memcpy(nskb->data, skb->data + copied, size);
729 copied += size; 885 copied += size;
730 dataleft -= size; 886 dataleft -= size;
@@ -744,19 +900,33 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
744 struct iucv_message *msg) 900 struct iucv_message *msg)
745{ 901{
746 int rc; 902 int rc;
903 unsigned int len;
904
905 len = iucv_msg_length(msg);
747 906
748 if (msg->flags & IPRMDATA) { 907 /* store msg target class in the second 4 bytes of skb ctrl buffer */
749 skb->data = NULL; 908 /* Note: the first 4 bytes are reserved for msg tag */
750 skb->len = 0; 909 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
910
911 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
912 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
913 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
914 skb->data = NULL;
915 skb->len = 0;
916 }
751 } else { 917 } else {
752 rc = iucv_message_receive(path, msg, 0, skb->data, 918 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
753 msg->length, NULL); 919 skb->data, len, NULL);
754 if (rc) { 920 if (rc) {
755 kfree_skb(skb); 921 kfree_skb(skb);
756 return; 922 return;
757 } 923 }
758 if (skb->truesize >= sk->sk_rcvbuf / 4) { 924 /* we need to fragment iucv messages for SOCK_STREAM only;
759 rc = iucv_fragment_skb(sk, skb, msg->length); 925 * for SOCK_SEQPACKET, it is only relevant if we support
926 * record segmentation using MSG_EOR (see also recvmsg()) */
927 if (sk->sk_type == SOCK_STREAM &&
928 skb->truesize >= sk->sk_rcvbuf / 4) {
929 rc = iucv_fragment_skb(sk, skb, len);
760 kfree_skb(skb); 930 kfree_skb(skb);
761 skb = NULL; 931 skb = NULL;
762 if (rc) { 932 if (rc) {
@@ -767,7 +937,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
767 } else { 937 } else {
768 skb_reset_transport_header(skb); 938 skb_reset_transport_header(skb);
769 skb_reset_network_header(skb); 939 skb_reset_network_header(skb);
770 skb->len = msg->length; 940 skb->len = len;
771 } 941 }
772 } 942 }
773 943
@@ -782,7 +952,7 @@ static void iucv_process_message_q(struct sock *sk)
782 struct sock_msg_q *p, *n; 952 struct sock_msg_q *p, *n;
783 953
784 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 954 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
785 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); 955 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
786 if (!skb) 956 if (!skb)
787 break; 957 break;
788 iucv_process_message(sk, skb, p->path, &p->msg); 958 iucv_process_message(sk, skb, p->path, &p->msg);
@@ -799,7 +969,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
799 int noblock = flags & MSG_DONTWAIT; 969 int noblock = flags & MSG_DONTWAIT;
800 struct sock *sk = sock->sk; 970 struct sock *sk = sock->sk;
801 struct iucv_sock *iucv = iucv_sk(sk); 971 struct iucv_sock *iucv = iucv_sk(sk);
802 int target, copied = 0; 972 unsigned int copied, rlen;
803 struct sk_buff *skb, *rskb, *cskb; 973 struct sk_buff *skb, *rskb, *cskb;
804 int err = 0; 974 int err = 0;
805 975
@@ -812,8 +982,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
812 if (flags & (MSG_OOB)) 982 if (flags & (MSG_OOB))
813 return -EOPNOTSUPP; 983 return -EOPNOTSUPP;
814 984
815 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
816
817 /* receive/dequeue next skb: 985 /* receive/dequeue next skb:
818 * the function understands MSG_PEEK and, thus, does not dequeue skb */ 986 * the function understands MSG_PEEK and, thus, does not dequeue skb */
819 skb = skb_recv_datagram(sk, flags, noblock, &err); 987 skb = skb_recv_datagram(sk, flags, noblock, &err);
@@ -823,25 +991,45 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
823 return err; 991 return err;
824 } 992 }
825 993
826 copied = min_t(unsigned int, skb->len, len); 994 rlen = skb->len; /* real length of skb */
995 copied = min_t(unsigned int, rlen, len);
827 996
828 cskb = skb; 997 cskb = skb;
829 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 998 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
830 skb_queue_head(&sk->sk_receive_queue, skb); 999 if (!(flags & MSG_PEEK))
831 if (copied == 0) 1000 skb_queue_head(&sk->sk_receive_queue, skb);
832 return -EFAULT; 1001 return -EFAULT;
833 goto done;
834 } 1002 }
835 1003
836 len -= copied; 1004 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1005 if (sk->sk_type == SOCK_SEQPACKET) {
1006 if (copied < rlen)
1007 msg->msg_flags |= MSG_TRUNC;
1008 /* each iucv message contains a complete record */
1009 msg->msg_flags |= MSG_EOR;
1010 }
1011
1012 /* create control message to store iucv msg target class:
1013 * get the trgcls from the control buffer of the skb due to
1014 * fragmentation of original iucv message. */
1015 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1016 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1017 if (err) {
1018 if (!(flags & MSG_PEEK))
1019 skb_queue_head(&sk->sk_receive_queue, skb);
1020 return err;
1021 }
837 1022
838 /* Mark read part of skb as used */ 1023 /* Mark read part of skb as used */
839 if (!(flags & MSG_PEEK)) { 1024 if (!(flags & MSG_PEEK)) {
840 skb_pull(skb, copied);
841 1025
842 if (skb->len) { 1026 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
843 skb_queue_head(&sk->sk_receive_queue, skb); 1027 if (sk->sk_type == SOCK_STREAM) {
844 goto done; 1028 skb_pull(skb, copied);
1029 if (skb->len) {
1030 skb_queue_head(&sk->sk_receive_queue, skb);
1031 goto done;
1032 }
845 } 1033 }
846 1034
847 kfree_skb(skb); 1035 kfree_skb(skb);
@@ -866,7 +1054,11 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
866 } 1054 }
867 1055
868done: 1056done:
869 return err ? : copied; 1057 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1058 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1059 copied = rlen;
1060
1061 return copied;
870} 1062}
871 1063
872static inline unsigned int iucv_accept_poll(struct sock *parent) 1064static inline unsigned int iucv_accept_poll(struct sock *parent)
@@ -928,7 +1120,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
928 struct iucv_sock *iucv = iucv_sk(sk); 1120 struct iucv_sock *iucv = iucv_sk(sk);
929 struct iucv_message txmsg; 1121 struct iucv_message txmsg;
930 int err = 0; 1122 int err = 0;
931 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
932 1123
933 how++; 1124 how++;
934 1125
@@ -953,7 +1144,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
953 txmsg.class = 0; 1144 txmsg.class = 0;
954 txmsg.tag = 0; 1145 txmsg.tag = 0;
955 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1146 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
956 (void *) prmmsg, 8); 1147 (void *) iprm_shutdown, 8);
957 if (err) { 1148 if (err) {
958 switch (err) { 1149 switch (err) {
959 case 1: 1150 case 1:
@@ -1007,6 +1198,98 @@ static int iucv_sock_release(struct socket *sock)
1007 return err; 1198 return err;
1008} 1199}
1009 1200
1201/* getsockopt and setsockopt */
1202static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1203 char __user *optval, int optlen)
1204{
1205 struct sock *sk = sock->sk;
1206 struct iucv_sock *iucv = iucv_sk(sk);
1207 int val;
1208 int rc;
1209
1210 if (level != SOL_IUCV)
1211 return -ENOPROTOOPT;
1212
1213 if (optlen < sizeof(int))
1214 return -EINVAL;
1215
1216 if (get_user(val, (int __user *) optval))
1217 return -EFAULT;
1218
1219 rc = 0;
1220
1221 lock_sock(sk);
1222 switch (optname) {
1223 case SO_IPRMDATA_MSG:
1224 if (val)
1225 iucv->flags |= IUCV_IPRMDATA;
1226 else
1227 iucv->flags &= ~IUCV_IPRMDATA;
1228 break;
1229 case SO_MSGLIMIT:
1230 switch (sk->sk_state) {
1231 case IUCV_OPEN:
1232 case IUCV_BOUND:
1233 if (val < 1 || val > (u16)(~0))
1234 rc = -EINVAL;
1235 else
1236 iucv->msglimit = val;
1237 break;
1238 default:
1239 rc = -EINVAL;
1240 break;
1241 }
1242 break;
1243 default:
1244 rc = -ENOPROTOOPT;
1245 break;
1246 }
1247 release_sock(sk);
1248
1249 return rc;
1250}
1251
1252static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1253 char __user *optval, int __user *optlen)
1254{
1255 struct sock *sk = sock->sk;
1256 struct iucv_sock *iucv = iucv_sk(sk);
1257 int val, len;
1258
1259 if (level != SOL_IUCV)
1260 return -ENOPROTOOPT;
1261
1262 if (get_user(len, optlen))
1263 return -EFAULT;
1264
1265 if (len < 0)
1266 return -EINVAL;
1267
1268 len = min_t(unsigned int, len, sizeof(int));
1269
1270 switch (optname) {
1271 case SO_IPRMDATA_MSG:
1272 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1273 break;
1274 case SO_MSGLIMIT:
1275 lock_sock(sk);
1276 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1277 : iucv->msglimit; /* default */
1278 release_sock(sk);
1279 break;
1280 default:
1281 return -ENOPROTOOPT;
1282 }
1283
1284 if (put_user(len, optlen))
1285 return -EFAULT;
1286 if (copy_to_user(optval, &val, len))
1287 return -EFAULT;
1288
1289 return 0;
1290}
1291
1292
1010/* Callback wrappers - called from iucv base support */ 1293/* Callback wrappers - called from iucv base support */
1011static int iucv_callback_connreq(struct iucv_path *path, 1294static int iucv_callback_connreq(struct iucv_path *path,
1012 u8 ipvmid[8], u8 ipuser[16]) 1295 u8 ipvmid[8], u8 ipuser[16])
@@ -1060,7 +1343,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1060 } 1343 }
1061 1344
1062 /* Create the new socket */ 1345 /* Create the new socket */
1063 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1346 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1064 if (!nsk) { 1347 if (!nsk) {
1065 err = iucv_path_sever(path, user_data); 1348 err = iucv_path_sever(path, user_data);
1066 iucv_path_free(path); 1349 iucv_path_free(path);
@@ -1083,7 +1366,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1083 memcpy(nuser_data + 8, niucv->src_name, 8); 1366 memcpy(nuser_data + 8, niucv->src_name, 8);
1084 ASCEBC(nuser_data + 8, 8); 1367 ASCEBC(nuser_data + 8, 8);
1085 1368
1086 path->msglim = IUCV_QUEUELEN_DEFAULT; 1369 /* set message limit for path based on msglimit of accepting socket */
1370 niucv->msglimit = iucv->msglimit;
1371 path->msglim = iucv->msglimit;
1087 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1372 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1088 if (err) { 1373 if (err) {
1089 err = iucv_path_sever(path, user_data); 1374 err = iucv_path_sever(path, user_data);
@@ -1131,19 +1416,17 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1131 goto save_message; 1416 goto save_message;
1132 1417
1133 len = atomic_read(&sk->sk_rmem_alloc); 1418 len = atomic_read(&sk->sk_rmem_alloc);
1134 len += msg->length + sizeof(struct sk_buff); 1419 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1135 if (len > sk->sk_rcvbuf) 1420 if (len > sk->sk_rcvbuf)
1136 goto save_message; 1421 goto save_message;
1137 1422
1138 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1423 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1139 if (!skb) 1424 if (!skb)
1140 goto save_message; 1425 goto save_message;
1141 1426
1142 iucv_process_message(sk, skb, path, msg); 1427 iucv_process_message(sk, skb, path, msg);
1143 goto out_unlock; 1428 goto out_unlock;
1144 1429
1145 return;
1146
1147save_message: 1430save_message:
1148 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1431 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1149 if (!save_msg) 1432 if (!save_msg)
@@ -1170,7 +1453,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1170 spin_lock_irqsave(&list->lock, flags); 1453 spin_lock_irqsave(&list->lock, flags);
1171 1454
1172 while (list_skb != (struct sk_buff *)list) { 1455 while (list_skb != (struct sk_buff *)list) {
1173 if (!memcmp(&msg->tag, list_skb->cb, 4)) { 1456 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1174 this = list_skb; 1457 this = list_skb;
1175 break; 1458 break;
1176 } 1459 }
@@ -1206,6 +1489,21 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1206 sk->sk_state_change(sk); 1489 sk->sk_state_change(sk);
1207} 1490}
1208 1491
1492/* called if the other communication side shuts down its RECV direction;
1493 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1494 */
1495static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1496{
1497 struct sock *sk = path->private;
1498
1499 bh_lock_sock(sk);
1500 if (sk->sk_state != IUCV_CLOSED) {
1501 sk->sk_shutdown |= SEND_SHUTDOWN;
1502 sk->sk_state_change(sk);
1503 }
1504 bh_unlock_sock(sk);
1505}
1506
1209static struct proto_ops iucv_sock_ops = { 1507static struct proto_ops iucv_sock_ops = {
1210 .family = PF_IUCV, 1508 .family = PF_IUCV,
1211 .owner = THIS_MODULE, 1509 .owner = THIS_MODULE,
@@ -1222,8 +1520,8 @@ static struct proto_ops iucv_sock_ops = {
1222 .mmap = sock_no_mmap, 1520 .mmap = sock_no_mmap,
1223 .socketpair = sock_no_socketpair, 1521 .socketpair = sock_no_socketpair,
1224 .shutdown = iucv_sock_shutdown, 1522 .shutdown = iucv_sock_shutdown,
1225 .setsockopt = sock_no_setsockopt, 1523 .setsockopt = iucv_sock_setsockopt,
1226 .getsockopt = sock_no_getsockopt 1524 .getsockopt = iucv_sock_getsockopt,
1227}; 1525};
1228 1526
1229static struct net_proto_family iucv_sock_family_ops = { 1527static struct net_proto_family iucv_sock_family_ops = {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a35240f61ec3..61e8038a55ee 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -280,6 +280,7 @@ union iucv_param {
280 * Anchor for per-cpu IUCV command parameter block. 280 * Anchor for per-cpu IUCV command parameter block.
281 */ 281 */
282static union iucv_param *iucv_param[NR_CPUS]; 282static union iucv_param *iucv_param[NR_CPUS];
283static union iucv_param *iucv_param_irq[NR_CPUS];
283 284
284/** 285/**
285 * iucv_call_b2f0 286 * iucv_call_b2f0
@@ -358,7 +359,7 @@ static void iucv_allow_cpu(void *data)
358 * 0x10 - Flag to allow priority message completion interrupts 359 * 0x10 - Flag to allow priority message completion interrupts
359 * 0x08 - Flag to allow IUCV control interrupts 360 * 0x08 - Flag to allow IUCV control interrupts
360 */ 361 */
361 parm = iucv_param[cpu]; 362 parm = iucv_param_irq[cpu];
362 memset(parm, 0, sizeof(union iucv_param)); 363 memset(parm, 0, sizeof(union iucv_param));
363 parm->set_mask.ipmask = 0xf8; 364 parm->set_mask.ipmask = 0xf8;
364 iucv_call_b2f0(IUCV_SETMASK, parm); 365 iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -379,7 +380,7 @@ static void iucv_block_cpu(void *data)
379 union iucv_param *parm; 380 union iucv_param *parm;
380 381
381 /* Disable all iucv interrupts. */ 382 /* Disable all iucv interrupts. */
382 parm = iucv_param[cpu]; 383 parm = iucv_param_irq[cpu];
383 memset(parm, 0, sizeof(union iucv_param)); 384 memset(parm, 0, sizeof(union iucv_param));
384 iucv_call_b2f0(IUCV_SETMASK, parm); 385 iucv_call_b2f0(IUCV_SETMASK, parm);
385 386
@@ -403,7 +404,7 @@ static void iucv_declare_cpu(void *data)
403 return; 404 return;
404 405
405 /* Declare interrupt buffer. */ 406 /* Declare interrupt buffer. */
406 parm = iucv_param[cpu]; 407 parm = iucv_param_irq[cpu];
407 memset(parm, 0, sizeof(union iucv_param)); 408 memset(parm, 0, sizeof(union iucv_param));
408 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 409 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
409 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 410 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
@@ -460,7 +461,7 @@ static void iucv_retrieve_cpu(void *data)
460 iucv_block_cpu(NULL); 461 iucv_block_cpu(NULL);
461 462
462 /* Retrieve interrupt buffer. */ 463 /* Retrieve interrupt buffer. */
463 parm = iucv_param[cpu]; 464 parm = iucv_param_irq[cpu];
464 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 465 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
465 466
466 /* Clear indication that an iucv buffer exists for this cpu. */ 467 /* Clear indication that an iucv buffer exists for this cpu. */
@@ -574,11 +575,22 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
574 iucv_irq_data[cpu] = NULL; 575 iucv_irq_data[cpu] = NULL;
575 return NOTIFY_BAD; 576 return NOTIFY_BAD;
576 } 577 }
578 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
579 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
580 if (!iucv_param_irq[cpu]) {
581 kfree(iucv_param[cpu]);
582 iucv_param[cpu] = NULL;
583 kfree(iucv_irq_data[cpu]);
584 iucv_irq_data[cpu] = NULL;
585 return NOTIFY_BAD;
586 }
577 break; 587 break;
578 case CPU_UP_CANCELED: 588 case CPU_UP_CANCELED:
579 case CPU_UP_CANCELED_FROZEN: 589 case CPU_UP_CANCELED_FROZEN:
580 case CPU_DEAD: 590 case CPU_DEAD:
581 case CPU_DEAD_FROZEN: 591 case CPU_DEAD_FROZEN:
592 kfree(iucv_param_irq[cpu]);
593 iucv_param_irq[cpu] = NULL;
582 kfree(iucv_param[cpu]); 594 kfree(iucv_param[cpu]);
583 iucv_param[cpu] = NULL; 595 iucv_param[cpu] = NULL;
584 kfree(iucv_irq_data[cpu]); 596 kfree(iucv_irq_data[cpu]);
@@ -625,7 +637,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
625{ 637{
626 union iucv_param *parm; 638 union iucv_param *parm;
627 639
628 parm = iucv_param[smp_processor_id()]; 640 parm = iucv_param_irq[smp_processor_id()];
629 memset(parm, 0, sizeof(union iucv_param)); 641 memset(parm, 0, sizeof(union iucv_param));
630 if (userdata) 642 if (userdata)
631 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 643 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -918,10 +930,8 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
918 if (iucv_active_cpu != smp_processor_id()) 930 if (iucv_active_cpu != smp_processor_id())
919 spin_lock_bh(&iucv_table_lock); 931 spin_lock_bh(&iucv_table_lock);
920 rc = iucv_sever_pathid(path->pathid, userdata); 932 rc = iucv_sever_pathid(path->pathid, userdata);
921 if (!rc) { 933 iucv_path_table[path->pathid] = NULL;
922 iucv_path_table[path->pathid] = NULL; 934 list_del_init(&path->list);
923 list_del_init(&path->list);
924 }
925 if (iucv_active_cpu != smp_processor_id()) 935 if (iucv_active_cpu != smp_processor_id())
926 spin_unlock_bh(&iucv_table_lock); 936 spin_unlock_bh(&iucv_table_lock);
927 preempt_enable(); 937 preempt_enable();
@@ -1378,6 +1388,8 @@ static void iucv_path_complete(struct iucv_irq_data *data)
1378 struct iucv_path_complete *ipc = (void *) data; 1388 struct iucv_path_complete *ipc = (void *) data;
1379 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1389 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1380 1390
1391 if (path)
1392 path->flags = ipc->ipflags1;
1381 if (path && path->handler && path->handler->path_complete) 1393 if (path && path->handler && path->handler->path_complete)
1382 path->handler->path_complete(path, ipc->ipuser); 1394 path->handler->path_complete(path, ipc->ipuser);
1383} 1395}
@@ -1413,7 +1425,7 @@ static void iucv_path_severed(struct iucv_irq_data *data)
1413 else { 1425 else {
1414 iucv_sever_pathid(path->pathid, NULL); 1426 iucv_sever_pathid(path->pathid, NULL);
1415 iucv_path_table[path->pathid] = NULL; 1427 iucv_path_table[path->pathid] = NULL;
1416 list_del_init(&path->list); 1428 list_del(&path->list);
1417 iucv_path_free(path); 1429 iucv_path_free(path);
1418 } 1430 }
1419} 1431}
@@ -1717,6 +1729,13 @@ static int __init iucv_init(void)
1717 rc = -ENOMEM; 1729 rc = -ENOMEM;
1718 goto out_free; 1730 goto out_free;
1719 } 1731 }
1732 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
1733 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1734 if (!iucv_param_irq[cpu]) {
1735 rc = -ENOMEM;
1736 goto out_free;
1737 }
1738
1720 } 1739 }
1721 rc = register_hotcpu_notifier(&iucv_cpu_notifier); 1740 rc = register_hotcpu_notifier(&iucv_cpu_notifier);
1722 if (rc) 1741 if (rc)
@@ -1734,6 +1753,8 @@ out_cpu:
1734 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1753 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1735out_free: 1754out_free:
1736 for_each_possible_cpu(cpu) { 1755 for_each_possible_cpu(cpu) {
1756 kfree(iucv_param_irq[cpu]);
1757 iucv_param_irq[cpu] = NULL;
1737 kfree(iucv_param[cpu]); 1758 kfree(iucv_param[cpu]);
1738 iucv_param[cpu] = NULL; 1759 iucv_param[cpu] = NULL;
1739 kfree(iucv_irq_data[cpu]); 1760 kfree(iucv_irq_data[cpu]);
@@ -1764,6 +1785,8 @@ static void __exit iucv_exit(void)
1764 spin_unlock_irq(&iucv_queue_lock); 1785 spin_unlock_irq(&iucv_queue_lock);
1765 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1786 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1766 for_each_possible_cpu(cpu) { 1787 for_each_possible_cpu(cpu) {
1788 kfree(iucv_param_irq[cpu]);
1789 iucv_param_irq[cpu] = NULL;
1767 kfree(iucv_param[cpu]); 1790 kfree(iucv_param[cpu]);
1768 iucv_param[cpu] = NULL; 1791 iucv_param[cpu] = NULL;
1769 kfree(iucv_irq_data[cpu]); 1792 kfree(iucv_irq_data[cpu]);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index febae702685c..9208cf5f2bd5 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -935,7 +935,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
935 935
936 if (llc->dev) { 936 if (llc->dev) {
937 sllc.sllc_arphrd = llc->dev->type; 937 sllc.sllc_arphrd = llc->dev->type;
938 memcpy(&sllc.sllc_mac, &llc->dev->dev_addr, 938 memcpy(&sllc.sllc_mac, llc->dev->dev_addr,
939 IFHWADDRLEN); 939 IFHWADDRLEN);
940 } 940 }
941 } 941 }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3477624a4906..c6bab39b018e 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -79,10 +79,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
79 79
80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { 80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
81 /* indicate or confirm not required */ 81 /* indicate or confirm not required */
82 /* XXX this is not very pretty, perhaps we should store
83 * XXX indicate/confirm-needed state in the llc_conn_state_ev
84 * XXX control block of the SKB instead? -DaveM
85 */
86 if (!skb->next) 82 if (!skb->next)
87 goto out_kfree_skb; 83 goto out_kfree_skb;
88 goto out_skb_put; 84 goto out_skb_put;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index ecc3faf9f11a..9cbf545e95a2 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -11,6 +11,22 @@ config MAC80211
11 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
12 networking stack. 12 networking stack.
13 13
14config MAC80211_DEFAULT_PS
15 bool "enable powersave by default"
16 depends on MAC80211
17 default y
18 help
19 This option enables powersave mode by default.
20
21 If this causes your applications to misbehave you should fix your
22 applications instead -- they need to register their network
23 latency requirement, see Documentation/power/pm_qos_interface.txt.
24
25config MAC80211_DEFAULT_PS_VALUE
26 int
27 default 1 if MAC80211_DEFAULT_PS
28 default 0
29
14menu "Rate control algorithm selection" 30menu "Rate control algorithm selection"
15 depends on MAC80211 != n 31 depends on MAC80211 != n
16 32
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 07656d830bc4..bc064d7933ff 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -16,12 +16,12 @@
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "driver-ops.h"
19 20
20void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 21void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
21 u16 initiator, u16 reason) 22 u16 initiator, u16 reason)
22{ 23{
23 struct ieee80211_local *local = sta->local; 24 struct ieee80211_local *local = sta->local;
24 struct ieee80211_hw *hw = &local->hw;
25 int i; 25 int i;
26 26
27 /* check if TID is in operational state */ 27 /* check if TID is in operational state */
@@ -41,8 +41,8 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 42#endif /* CONFIG_MAC80211_HT_DEBUG */
43 43
44 if (local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, 44 if (drv_ampdu_action(local, IEEE80211_AMPDU_RX_STOP,
45 &sta->sta, tid, NULL)) 45 &sta->sta, tid, NULL))
46 printk(KERN_DEBUG "HW problem - can not stop rx " 46 printk(KERN_DEBUG "HW problem - can not stop rx "
47 "aggregation for tid %d\n", tid); 47 "aggregation for tid %d\n", tid);
48 48
@@ -68,6 +68,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
68 spin_lock_bh(&sta->lock); 68 spin_lock_bh(&sta->lock);
69 /* free resources */ 69 /* free resources */
70 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); 70 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
71 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time);
71 72
72 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) { 73 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
73 kfree(sta->ampdu_mlme.tid_rx[tid]); 74 kfree(sta->ampdu_mlme.tid_rx[tid]);
@@ -268,19 +269,23 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
268 /* prepare reordering buffer */ 269 /* prepare reordering buffer */
269 tid_agg_rx->reorder_buf = 270 tid_agg_rx->reorder_buf =
270 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC); 271 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
271 if (!tid_agg_rx->reorder_buf) { 272 tid_agg_rx->reorder_time =
273 kcalloc(buf_size, sizeof(unsigned long), GFP_ATOMIC);
274 if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
272#ifdef CONFIG_MAC80211_HT_DEBUG 275#ifdef CONFIG_MAC80211_HT_DEBUG
273 if (net_ratelimit()) 276 if (net_ratelimit())
274 printk(KERN_ERR "can not allocate reordering buffer " 277 printk(KERN_ERR "can not allocate reordering buffer "
275 "to tid %d\n", tid); 278 "to tid %d\n", tid);
276#endif 279#endif
280 kfree(tid_agg_rx->reorder_buf);
281 kfree(tid_agg_rx->reorder_time);
277 kfree(sta->ampdu_mlme.tid_rx[tid]); 282 kfree(sta->ampdu_mlme.tid_rx[tid]);
283 sta->ampdu_mlme.tid_rx[tid] = NULL;
278 goto end; 284 goto end;
279 } 285 }
280 286
281 if (local->ops->ampdu_action) 287 ret = drv_ampdu_action(local, IEEE80211_AMPDU_RX_START,
282 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, 288 &sta->sta, tid, &start_seq_num);
283 &sta->sta, tid, &start_seq_num);
284#ifdef CONFIG_MAC80211_HT_DEBUG 289#ifdef CONFIG_MAC80211_HT_DEBUG
285 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 290 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
286#endif /* CONFIG_MAC80211_HT_DEBUG */ 291#endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 947aaaad35d2..43d00ffd3988 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -16,6 +16,7 @@
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "driver-ops.h"
19#include "wme.h" 20#include "wme.h"
20 21
21/** 22/**
@@ -134,8 +135,8 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
134 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 135 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
135 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 136 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
136 137
137 ret = local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_STOP, 138 ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_STOP,
138 &sta->sta, tid, NULL); 139 &sta->sta, tid, NULL);
139 140
140 /* HW shall not deny going back to legacy */ 141 /* HW shall not deny going back to legacy */
141 if (WARN_ON(ret)) { 142 if (WARN_ON(ret)) {
@@ -306,8 +307,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
306 307
307 start_seq_num = sta->tid_seq[tid]; 308 start_seq_num = sta->tid_seq[tid];
308 309
309 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, 310 ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_START,
310 &sta->sta, tid, &start_seq_num); 311 &sta->sta, tid, &start_seq_num);
311 312
312 if (ret) { 313 if (ret) {
313#ifdef CONFIG_MAC80211_HT_DEBUG 314#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -418,8 +419,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
418 ieee80211_agg_splice_finish(local, sta, tid); 419 ieee80211_agg_splice_finish(local, sta, tid);
419 spin_unlock(&local->ampdu_lock); 420 spin_unlock(&local->ampdu_lock);
420 421
421 local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL, 422 drv_ampdu_action(local, IEEE80211_AMPDU_TX_OPERATIONAL,
422 &sta->sta, tid, NULL); 423 &sta->sta, tid, NULL);
423} 424}
424 425
425void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) 426void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e677b751d468..77e9ff5ec4f3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -13,6 +13,7 @@
13#include <linux/rcupdate.h> 13#include <linux/rcupdate.h>
14#include <net/cfg80211.h> 14#include <net/cfg80211.h>
15#include "ieee80211_i.h" 15#include "ieee80211_i.h"
16#include "driver-ops.h"
16#include "cfg.h" 17#include "cfg.h"
17#include "rate.h" 18#include "rate.h"
18#include "mesh.h" 19#include "mesh.h"
@@ -111,7 +112,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
111} 112}
112 113
113static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, 114static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
114 u8 key_idx, u8 *mac_addr, 115 u8 key_idx, const u8 *mac_addr,
115 struct key_params *params) 116 struct key_params *params)
116{ 117{
117 struct ieee80211_sub_if_data *sdata; 118 struct ieee80211_sub_if_data *sdata;
@@ -140,7 +141,8 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
140 return -EINVAL; 141 return -EINVAL;
141 } 142 }
142 143
143 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key); 144 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key,
145 params->seq_len, params->seq);
144 if (!key) 146 if (!key)
145 return -ENOMEM; 147 return -ENOMEM;
146 148
@@ -165,7 +167,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
165} 167}
166 168
167static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, 169static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
168 u8 key_idx, u8 *mac_addr) 170 u8 key_idx, const u8 *mac_addr)
169{ 171{
170 struct ieee80211_sub_if_data *sdata; 172 struct ieee80211_sub_if_data *sdata;
171 struct sta_info *sta; 173 struct sta_info *sta;
@@ -207,7 +209,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
207} 209}
208 210
209static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, 211static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
210 u8 key_idx, u8 *mac_addr, void *cookie, 212 u8 key_idx, const u8 *mac_addr, void *cookie,
211 void (*callback)(void *cookie, 213 void (*callback)(void *cookie,
212 struct key_params *params)) 214 struct key_params *params))
213{ 215{
@@ -245,12 +247,10 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
245 iv32 = key->u.tkip.tx.iv32; 247 iv32 = key->u.tkip.tx.iv32;
246 iv16 = key->u.tkip.tx.iv16; 248 iv16 = key->u.tkip.tx.iv16;
247 249
248 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 250 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
249 sdata->local->ops->get_tkip_seq) 251 drv_get_tkip_seq(sdata->local,
250 sdata->local->ops->get_tkip_seq( 252 key->conf.hw_key_idx,
251 local_to_hw(sdata->local), 253 &iv32, &iv16);
252 key->conf.hw_key_idx,
253 &iv32, &iv16);
254 254
255 seq[0] = iv16 & 0xff; 255 seq[0] = iv16 & 0xff;
256 seq[1] = (iv16 >> 8) & 0xff; 256 seq[1] = (iv16 >> 8) & 0xff;
@@ -451,18 +451,11 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
451 * This is a kludge. beacon interval should really be part 451 * This is a kludge. beacon interval should really be part
452 * of the beacon information. 452 * of the beacon information.
453 */ 453 */
454 if (params->interval && (sdata->local->hw.conf.beacon_int != 454 if (params->interval &&
455 params->interval)) { 455 (sdata->vif.bss_conf.beacon_int != params->interval)) {
456 sdata->local->hw.conf.beacon_int = params->interval; 456 sdata->vif.bss_conf.beacon_int = params->interval;
457 err = ieee80211_hw_config(sdata->local, 457 ieee80211_bss_info_change_notify(sdata,
458 IEEE80211_CONF_CHANGE_BEACON_INTERVAL); 458 BSS_CHANGED_BEACON_INT);
459 if (err < 0)
460 return err;
461 /*
462 * We updated some parameter so if below bails out
463 * it's not an error.
464 */
465 err = 0;
466 } 459 }
467 460
468 /* Need to have a beacon head if we don't have one yet */ 461 /* Need to have a beacon head if we don't have one yet */
@@ -528,8 +521,9 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
528 521
529 kfree(old); 522 kfree(old);
530 523
531 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON | 524 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
532 IEEE80211_IFCC_BEACON_ENABLED); 525 BSS_CHANGED_BEACON);
526 return 0;
533} 527}
534 528
535static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 529static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -580,7 +574,8 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
580 synchronize_rcu(); 574 synchronize_rcu();
581 kfree(old); 575 kfree(old);
582 576
583 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON_ENABLED); 577 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
578 return 0;
584} 579}
585 580
586/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ 581/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
@@ -635,34 +630,38 @@ static void sta_apply_parameters(struct ieee80211_local *local,
635 int i, j; 630 int i, j;
636 struct ieee80211_supported_band *sband; 631 struct ieee80211_supported_band *sband;
637 struct ieee80211_sub_if_data *sdata = sta->sdata; 632 struct ieee80211_sub_if_data *sdata = sta->sdata;
633 u32 mask, set;
638 634
639 sband = local->hw.wiphy->bands[local->oper_channel->band]; 635 sband = local->hw.wiphy->bands[local->oper_channel->band];
640 636
641 /* 637 spin_lock_bh(&sta->lock);
642 * FIXME: updating the flags is racy when this function is 638 mask = params->sta_flags_mask;
643 * called from ieee80211_change_station(), this will 639 set = params->sta_flags_set;
644 * be resolved in a future patch.
645 */
646 640
647 if (params->station_flags & STATION_FLAG_CHANGED) { 641 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
648 spin_lock_bh(&sta->lock);
649 sta->flags &= ~WLAN_STA_AUTHORIZED; 642 sta->flags &= ~WLAN_STA_AUTHORIZED;
650 if (params->station_flags & STATION_FLAG_AUTHORIZED) 643 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
651 sta->flags |= WLAN_STA_AUTHORIZED; 644 sta->flags |= WLAN_STA_AUTHORIZED;
645 }
652 646
647 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
653 sta->flags &= ~WLAN_STA_SHORT_PREAMBLE; 648 sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
654 if (params->station_flags & STATION_FLAG_SHORT_PREAMBLE) 649 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
655 sta->flags |= WLAN_STA_SHORT_PREAMBLE; 650 sta->flags |= WLAN_STA_SHORT_PREAMBLE;
651 }
656 652
653 if (mask & BIT(NL80211_STA_FLAG_WME)) {
657 sta->flags &= ~WLAN_STA_WME; 654 sta->flags &= ~WLAN_STA_WME;
658 if (params->station_flags & STATION_FLAG_WME) 655 if (set & BIT(NL80211_STA_FLAG_WME))
659 sta->flags |= WLAN_STA_WME; 656 sta->flags |= WLAN_STA_WME;
657 }
660 658
659 if (mask & BIT(NL80211_STA_FLAG_MFP)) {
661 sta->flags &= ~WLAN_STA_MFP; 660 sta->flags &= ~WLAN_STA_MFP;
662 if (params->station_flags & STATION_FLAG_MFP) 661 if (set & BIT(NL80211_STA_FLAG_MFP))
663 sta->flags |= WLAN_STA_MFP; 662 sta->flags |= WLAN_STA_MFP;
664 spin_unlock_bh(&sta->lock);
665 } 663 }
664 spin_unlock_bh(&sta->lock);
666 665
667 /* 666 /*
668 * FIXME: updating the following information is racy when this 667 * FIXME: updating the following information is racy when this
@@ -1120,7 +1119,7 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1120 p.cw_max = params->cwmax; 1119 p.cw_max = params->cwmax;
1121 p.cw_min = params->cwmin; 1120 p.cw_min = params->cwmin;
1122 p.txop = params->txop; 1121 p.txop = params->txop;
1123 if (local->ops->conf_tx(local_to_hw(local), params->queue, &p)) { 1122 if (drv_conf_tx(local, params->queue, &p)) {
1124 printk(KERN_DEBUG "%s: failed to set TX queue " 1123 printk(KERN_DEBUG "%s: failed to set TX queue "
1125 "parameters for queue %d\n", local->mdev->name, 1124 "parameters for queue %d\n", local->mdev->name,
1126 params->queue); 1125 params->queue);
@@ -1167,7 +1166,8 @@ static int ieee80211_scan(struct wiphy *wiphy,
1167 1166
1168 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1167 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1169 sdata->vif.type != NL80211_IFTYPE_ADHOC && 1168 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1170 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 1169 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
1170 (sdata->vif.type != NL80211_IFTYPE_AP || sdata->u.ap.beacon))
1171 return -EOPNOTSUPP; 1171 return -EOPNOTSUPP;
1172 1172
1173 return ieee80211_request_scan(sdata, req); 1173 return ieee80211_request_scan(sdata, req);
@@ -1258,6 +1258,19 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1258 if (ret) 1258 if (ret)
1259 return ret; 1259 return ret;
1260 1260
1261 if (req->use_mfp) {
1262 sdata->u.mgd.mfp = IEEE80211_MFP_REQUIRED;
1263 sdata->u.mgd.flags |= IEEE80211_STA_MFP_ENABLED;
1264 } else {
1265 sdata->u.mgd.mfp = IEEE80211_MFP_DISABLED;
1266 sdata->u.mgd.flags &= ~IEEE80211_STA_MFP_ENABLED;
1267 }
1268
1269 if (req->control_port)
1270 sdata->u.mgd.flags |= IEEE80211_STA_CONTROL_PORT;
1271 else
1272 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
1273
1261 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME; 1274 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
1262 sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE; 1275 sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE;
1263 ieee80211_sta_req_auth(sdata); 1276 ieee80211_sta_req_auth(sdata);
@@ -1267,25 +1280,59 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1267static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, 1280static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
1268 struct cfg80211_deauth_request *req) 1281 struct cfg80211_deauth_request *req)
1269{ 1282{
1270 struct ieee80211_sub_if_data *sdata; 1283 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1271
1272 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1273 1284
1274 /* TODO: req->ie */ 1285 /* TODO: req->ie, req->peer_addr */
1275 return ieee80211_sta_deauthenticate(sdata, req->reason_code); 1286 return ieee80211_sta_deauthenticate(sdata, req->reason_code);
1276} 1287}
1277 1288
1278static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, 1289static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1279 struct cfg80211_disassoc_request *req) 1290 struct cfg80211_disassoc_request *req)
1280{ 1291{
1281 struct ieee80211_sub_if_data *sdata; 1292 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1282
1283 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1284 1293
1285 /* TODO: req->ie */ 1294 /* TODO: req->ie, req->peer_addr */
1286 return ieee80211_sta_disassociate(sdata, req->reason_code); 1295 return ieee80211_sta_disassociate(sdata, req->reason_code);
1287} 1296}
1288 1297
1298static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1299 struct cfg80211_ibss_params *params)
1300{
1301 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1302
1303 return ieee80211_ibss_join(sdata, params);
1304}
1305
1306static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1307{
1308 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1309
1310 return ieee80211_ibss_leave(sdata);
1311}
1312
1313static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1314{
1315 struct ieee80211_local *local = wiphy_priv(wiphy);
1316 int err;
1317
1318 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1319 err = drv_set_rts_threshold(local, wiphy->rts_threshold);
1320
1321 if (err)
1322 return err;
1323 }
1324
1325 if (changed & WIPHY_PARAM_RETRY_SHORT)
1326 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
1327 if (changed & WIPHY_PARAM_RETRY_LONG)
1328 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
1329 if (changed &
1330 (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG))
1331 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
1332
1333 return 0;
1334}
1335
1289struct cfg80211_ops mac80211_config_ops = { 1336struct cfg80211_ops mac80211_config_ops = {
1290 .add_virtual_intf = ieee80211_add_iface, 1337 .add_virtual_intf = ieee80211_add_iface,
1291 .del_virtual_intf = ieee80211_del_iface, 1338 .del_virtual_intf = ieee80211_del_iface,
@@ -1322,4 +1369,7 @@ struct cfg80211_ops mac80211_config_ops = {
1322 .assoc = ieee80211_assoc, 1369 .assoc = ieee80211_assoc,
1323 .deauth = ieee80211_deauth, 1370 .deauth = ieee80211_deauth,
1324 .disassoc = ieee80211_disassoc, 1371 .disassoc = ieee80211_disassoc,
1372 .join_ibss = ieee80211_join_ibss,
1373 .leave_ibss = ieee80211_leave_ibss,
1374 .set_wiphy_params = ieee80211_set_wiphy_params,
1325}; 1375};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 210b9b6fecd2..11c72311f35b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/rtnetlink.h> 11#include <linux/rtnetlink.h>
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "driver-ops.h"
13#include "rate.h" 14#include "rate.h"
14#include "debugfs.h" 15#include "debugfs.h"
15 16
@@ -51,14 +52,6 @@ static const struct file_operations name## _ops = { \
51 52
52DEBUGFS_READONLY_FILE(frequency, 20, "%d", 53DEBUGFS_READONLY_FILE(frequency, 20, "%d",
53 local->hw.conf.channel->center_freq); 54 local->hw.conf.channel->center_freq);
54DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
55 local->rts_threshold);
56DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
57 local->fragmentation_threshold);
58DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
59 local->hw.conf.short_frame_max_tx_count);
60DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
61 local->hw.conf.long_frame_max_tx_count);
62DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 55DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
63 local->total_ps_buffered); 56 local->total_ps_buffered);
64DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x", 57DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
@@ -70,11 +63,10 @@ static ssize_t tsf_read(struct file *file, char __user *user_buf,
70 size_t count, loff_t *ppos) 63 size_t count, loff_t *ppos)
71{ 64{
72 struct ieee80211_local *local = file->private_data; 65 struct ieee80211_local *local = file->private_data;
73 u64 tsf = 0; 66 u64 tsf;
74 char buf[100]; 67 char buf[100];
75 68
76 if (local->ops->get_tsf) 69 tsf = drv_get_tsf(local);
77 tsf = local->ops->get_tsf(local_to_hw(local));
78 70
79 snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf); 71 snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf);
80 72
@@ -97,13 +89,13 @@ static ssize_t tsf_write(struct file *file,
97 89
98 if (strncmp(buf, "reset", 5) == 0) { 90 if (strncmp(buf, "reset", 5) == 0) {
99 if (local->ops->reset_tsf) { 91 if (local->ops->reset_tsf) {
100 local->ops->reset_tsf(local_to_hw(local)); 92 drv_reset_tsf(local);
101 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy)); 93 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy));
102 } 94 }
103 } else { 95 } else {
104 tsf = simple_strtoul(buf, NULL, 0); 96 tsf = simple_strtoul(buf, NULL, 0);
105 if (local->ops->set_tsf) { 97 if (local->ops->set_tsf) {
106 local->ops->set_tsf(local_to_hw(local), tsf); 98 drv_set_tsf(local, tsf);
107 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf); 99 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf);
108 } 100 }
109 } 101 }
@@ -135,6 +127,42 @@ static const struct file_operations reset_ops = {
135 .open = mac80211_open_file_generic, 127 .open = mac80211_open_file_generic,
136}; 128};
137 129
130static ssize_t noack_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct ieee80211_local *local = file->private_data;
134 int res;
135 char buf[10];
136
137 res = scnprintf(buf, sizeof(buf), "%d\n", local->wifi_wme_noack_test);
138
139 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
140}
141
142static ssize_t noack_write(struct file *file,
143 const char __user *user_buf,
144 size_t count, loff_t *ppos)
145{
146 struct ieee80211_local *local = file->private_data;
147 char buf[10];
148 size_t len;
149
150 len = min(count, sizeof(buf) - 1);
151 if (copy_from_user(buf, user_buf, len))
152 return -EFAULT;
153 buf[len] = '\0';
154
155 local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
156
157 return count;
158}
159
160static const struct file_operations noack_ops = {
161 .read = noack_read,
162 .write = noack_write,
163 .open = mac80211_open_file_generic
164};
165
138/* statistics stuff */ 166/* statistics stuff */
139 167
140#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 168#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -150,14 +178,12 @@ static ssize_t format_devstat_counter(struct ieee80211_local *local,
150 char buf[20]; 178 char buf[20];
151 int res; 179 int res;
152 180
153 if (!local->ops->get_stats)
154 return -EOPNOTSUPP;
155
156 rtnl_lock(); 181 rtnl_lock();
157 res = local->ops->get_stats(local_to_hw(local), &stats); 182 res = drv_get_stats(local, &stats);
158 rtnl_unlock(); 183 rtnl_unlock();
159 if (!res) 184 if (res)
160 res = printvalue(&stats, buf, sizeof(buf)); 185 return res;
186 res = printvalue(&stats, buf, sizeof(buf));
161 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 187 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
162} 188}
163 189
@@ -269,14 +295,11 @@ void debugfs_hw_add(struct ieee80211_local *local)
269 local->debugfs.keys = debugfs_create_dir("keys", phyd); 295 local->debugfs.keys = debugfs_create_dir("keys", phyd);
270 296
271 DEBUGFS_ADD(frequency); 297 DEBUGFS_ADD(frequency);
272 DEBUGFS_ADD(rts_threshold);
273 DEBUGFS_ADD(fragmentation_threshold);
274 DEBUGFS_ADD(short_retry_limit);
275 DEBUGFS_ADD(long_retry_limit);
276 DEBUGFS_ADD(total_ps_buffered); 298 DEBUGFS_ADD(total_ps_buffered);
277 DEBUGFS_ADD(wep_iv); 299 DEBUGFS_ADD(wep_iv);
278 DEBUGFS_ADD(tsf); 300 DEBUGFS_ADD(tsf);
279 DEBUGFS_ADD_MODE(reset, 0200); 301 DEBUGFS_ADD_MODE(reset, 0200);
302 DEBUGFS_ADD(noack);
280 303
281 statsd = debugfs_create_dir("statistics", phyd); 304 statsd = debugfs_create_dir("statistics", phyd);
282 local->debugfs.statistics = statsd; 305 local->debugfs.statistics = statsd;
@@ -324,14 +347,11 @@ void debugfs_hw_add(struct ieee80211_local *local)
324void debugfs_hw_del(struct ieee80211_local *local) 347void debugfs_hw_del(struct ieee80211_local *local)
325{ 348{
326 DEBUGFS_DEL(frequency); 349 DEBUGFS_DEL(frequency);
327 DEBUGFS_DEL(rts_threshold);
328 DEBUGFS_DEL(fragmentation_threshold);
329 DEBUGFS_DEL(short_retry_limit);
330 DEBUGFS_DEL(long_retry_limit);
331 DEBUGFS_DEL(total_ps_buffered); 350 DEBUGFS_DEL(total_ps_buffered);
332 DEBUGFS_DEL(wep_iv); 351 DEBUGFS_DEL(wep_iv);
333 DEBUGFS_DEL(tsf); 352 DEBUGFS_DEL(tsf);
334 DEBUGFS_DEL(reset); 353 DEBUGFS_DEL(reset);
354 DEBUGFS_DEL(noack);
335 355
336 DEBUGFS_STATS_DEL(transmitted_fragment_count); 356 DEBUGFS_STATS_DEL(transmitted_fragment_count);
337 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 357 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
new file mode 100644
index 000000000000..3912b5334b9c
--- /dev/null
+++ b/net/mac80211/driver-ops.h
@@ -0,0 +1,184 @@
1#ifndef __MAC80211_DRIVER_OPS
2#define __MAC80211_DRIVER_OPS
3
4#include <net/mac80211.h>
5#include "ieee80211_i.h"
6
7static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
8{
9 return local->ops->tx(&local->hw, skb);
10}
11
12static inline int drv_start(struct ieee80211_local *local)
13{
14 return local->ops->start(&local->hw);
15}
16
17static inline void drv_stop(struct ieee80211_local *local)
18{
19 local->ops->stop(&local->hw);
20}
21
22static inline int drv_add_interface(struct ieee80211_local *local,
23 struct ieee80211_if_init_conf *conf)
24{
25 return local->ops->add_interface(&local->hw, conf);
26}
27
28static inline void drv_remove_interface(struct ieee80211_local *local,
29 struct ieee80211_if_init_conf *conf)
30{
31 local->ops->remove_interface(&local->hw, conf);
32}
33
34static inline int drv_config(struct ieee80211_local *local, u32 changed)
35{
36 return local->ops->config(&local->hw, changed);
37}
38
39static inline void drv_bss_info_changed(struct ieee80211_local *local,
40 struct ieee80211_vif *vif,
41 struct ieee80211_bss_conf *info,
42 u32 changed)
43{
44 if (local->ops->bss_info_changed)
45 local->ops->bss_info_changed(&local->hw, vif, info, changed);
46}
47
48static inline void drv_configure_filter(struct ieee80211_local *local,
49 unsigned int changed_flags,
50 unsigned int *total_flags,
51 int mc_count,
52 struct dev_addr_list *mc_list)
53{
54 local->ops->configure_filter(&local->hw, changed_flags, total_flags,
55 mc_count, mc_list);
56}
57
58static inline int drv_set_tim(struct ieee80211_local *local,
59 struct ieee80211_sta *sta, bool set)
60{
61 if (local->ops->set_tim)
62 return local->ops->set_tim(&local->hw, sta, set);
63 return 0;
64}
65
66static inline int drv_set_key(struct ieee80211_local *local,
67 enum set_key_cmd cmd, struct ieee80211_vif *vif,
68 struct ieee80211_sta *sta,
69 struct ieee80211_key_conf *key)
70{
71 return local->ops->set_key(&local->hw, cmd, vif, sta, key);
72}
73
74static inline void drv_update_tkip_key(struct ieee80211_local *local,
75 struct ieee80211_key_conf *conf,
76 const u8 *address, u32 iv32,
77 u16 *phase1key)
78{
79 if (local->ops->update_tkip_key)
80 local->ops->update_tkip_key(&local->hw, conf, address,
81 iv32, phase1key);
82}
83
84static inline int drv_hw_scan(struct ieee80211_local *local,
85 struct cfg80211_scan_request *req)
86{
87 return local->ops->hw_scan(&local->hw, req);
88}
89
90static inline void drv_sw_scan_start(struct ieee80211_local *local)
91{
92 if (local->ops->sw_scan_start)
93 local->ops->sw_scan_start(&local->hw);
94}
95
96static inline void drv_sw_scan_complete(struct ieee80211_local *local)
97{
98 if (local->ops->sw_scan_complete)
99 local->ops->sw_scan_complete(&local->hw);
100}
101
102static inline int drv_get_stats(struct ieee80211_local *local,
103 struct ieee80211_low_level_stats *stats)
104{
105 if (!local->ops->get_stats)
106 return -EOPNOTSUPP;
107 return local->ops->get_stats(&local->hw, stats);
108}
109
110static inline void drv_get_tkip_seq(struct ieee80211_local *local,
111 u8 hw_key_idx, u32 *iv32, u16 *iv16)
112{
113 if (local->ops->get_tkip_seq)
114 local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16);
115}
116
117static inline int drv_set_rts_threshold(struct ieee80211_local *local,
118 u32 value)
119{
120 if (local->ops->set_rts_threshold)
121 return local->ops->set_rts_threshold(&local->hw, value);
122 return 0;
123}
124
125static inline void drv_sta_notify(struct ieee80211_local *local,
126 struct ieee80211_vif *vif,
127 enum sta_notify_cmd cmd,
128 struct ieee80211_sta *sta)
129{
130 if (local->ops->sta_notify)
131 local->ops->sta_notify(&local->hw, vif, cmd, sta);
132}
133
134static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
135 const struct ieee80211_tx_queue_params *params)
136{
137 if (local->ops->conf_tx)
138 return local->ops->conf_tx(&local->hw, queue, params);
139 return -EOPNOTSUPP;
140}
141
142static inline int drv_get_tx_stats(struct ieee80211_local *local,
143 struct ieee80211_tx_queue_stats *stats)
144{
145 return local->ops->get_tx_stats(&local->hw, stats);
146}
147
148static inline u64 drv_get_tsf(struct ieee80211_local *local)
149{
150 if (local->ops->get_tsf)
151 return local->ops->get_tsf(&local->hw);
152 return -1ULL;
153}
154
155static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
156{
157 if (local->ops->set_tsf)
158 local->ops->set_tsf(&local->hw, tsf);
159}
160
161static inline void drv_reset_tsf(struct ieee80211_local *local)
162{
163 if (local->ops->reset_tsf)
164 local->ops->reset_tsf(&local->hw);
165}
166
167static inline int drv_tx_last_beacon(struct ieee80211_local *local)
168{
169 if (local->ops->tx_last_beacon)
170 return local->ops->tx_last_beacon(&local->hw);
171 return 1;
172}
173
174static inline int drv_ampdu_action(struct ieee80211_local *local,
175 enum ieee80211_ampdu_mlme_action action,
176 struct ieee80211_sta *sta, u16 tid,
177 u16 *ssn)
178{
179 if (local->ops->ampdu_action)
180 return local->ops->ampdu_action(&local->hw, action,
181 sta, tid, ssn);
182 return -EOPNOTSUPP;
183}
184#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index 0d95561c0ee0..f288d01a6344 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -12,12 +12,12 @@
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13 13
14/* 14/*
15 * indicate a failed Michael MIC to userspace; the passed packet 15 * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of
16 * (in the variable hdr) must be long enough to extract the TKIP 16 * the frame that generated the MIC failure (i.e., if it was provided by the
17 * fields like TSC 17 * driver or is still in the frame), it should provide that information.
18 */ 18 */
19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
20 struct ieee80211_hdr *hdr) 20 struct ieee80211_hdr *hdr, const u8 *tsc)
21{ 21{
22 union iwreq_data wrqu; 22 union iwreq_data wrqu;
23 char *buf = kmalloc(128, GFP_ATOMIC); 23 char *buf = kmalloc(128, GFP_ATOMIC);
@@ -34,8 +34,9 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
34 kfree(buf); 34 kfree(buf);
35 } 35 }
36 36
37 /* 37 cfg80211_michael_mic_failure(sdata->dev, hdr->addr2,
38 * TODO: re-add support for sending MIC failure indication 38 (hdr->addr1[0] & 0x01) ?
39 * with all info via nl80211 39 NL80211_KEYTYPE_GROUP :
40 */ 40 NL80211_KEYTYPE_PAIRWISE,
41 keyidx, tsc);
41} 42}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 4e3c72f20de7..0891bfb06996 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <net/wireless.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
19#include "ieee80211_i.h" 18#include "ieee80211_i.h"
20#include "rate.h" 19#include "rate.h"
@@ -83,89 +82,6 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
83 ht_cap->mcs.rx_mask[32/8] |= 1; 82 ht_cap->mcs.rx_mask[32/8] |= 1;
84} 83}
85 84
86/*
87 * ieee80211_enable_ht should be called only after the operating band
88 * has been determined as ht configuration depends on the hw's
89 * HT abilities for a specific band.
90 */
91u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
92 struct ieee80211_ht_info *hti,
93 u16 ap_ht_cap_flags)
94{
95 struct ieee80211_local *local = sdata->local;
96 struct ieee80211_supported_band *sband;
97 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
98 struct ieee80211_bss_ht_conf ht;
99 struct sta_info *sta;
100 u32 changed = 0;
101 bool enable_ht = true, ht_changed;
102 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
103
104 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
105
106 memset(&ht, 0, sizeof(ht));
107
108 /* HT is not supported */
109 if (!sband->ht_cap.ht_supported)
110 enable_ht = false;
111
112 /* check that channel matches the right operating channel */
113 if (local->hw.conf.channel->center_freq !=
114 ieee80211_channel_to_frequency(hti->control_chan))
115 enable_ht = false;
116
117 if (enable_ht) {
118 channel_type = NL80211_CHAN_HT20;
119
120 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
121 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
122 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
123 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
124 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
125 channel_type = NL80211_CHAN_HT40PLUS;
126 break;
127 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
128 channel_type = NL80211_CHAN_HT40MINUS;
129 break;
130 }
131 }
132 }
133
134 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
135 channel_type != local->hw.conf.channel_type;
136
137 local->oper_channel_type = channel_type;
138
139 if (ht_changed) {
140 /* channel_type change automatically detected */
141 ieee80211_hw_config(local, 0);
142
143 rcu_read_lock();
144
145 sta = sta_info_get(local, ifmgd->bssid);
146 if (sta)
147 rate_control_rate_update(local, sband, sta,
148 IEEE80211_RC_HT_CHANGED);
149
150 rcu_read_unlock();
151
152 }
153
154 /* disable HT */
155 if (!enable_ht)
156 return 0;
157
158 ht.operation_mode = le16_to_cpu(hti->operation_mode);
159
160 /* if bss configuration changed store the new one */
161 if (memcmp(&sdata->vif.bss_conf.ht, &ht, sizeof(ht))) {
162 changed |= BSS_CHANGED_HT;
163 sdata->vif.bss_conf.ht = ht;
164 }
165
166 return changed;
167}
168
169void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) 85void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
170{ 86{
171 int i; 87 int i;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3201e1f96365..0b30277eb366 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -22,6 +22,7 @@
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23 23
24#include "ieee80211_i.h" 24#include "ieee80211_i.h"
25#include "driver-ops.h"
25#include "rate.h" 26#include "rate.h"
26 27
27#define IEEE80211_SCAN_INTERVAL (2 * HZ) 28#define IEEE80211_SCAN_INTERVAL (2 * HZ)
@@ -59,74 +60,65 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
59 sdata->u.ibss.bssid, 0); 60 sdata->u.ibss.bssid, 0);
60} 61}
61 62
62static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 63static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
63 const u8 *bssid, const int beacon_int, 64 const u8 *bssid, const int beacon_int,
64 const int freq, 65 struct ieee80211_channel *chan,
65 const size_t supp_rates_len, 66 const u32 basic_rates,
66 const u8 *supp_rates, 67 const u16 capability, u64 tsf)
67 const u16 capability, u64 tsf)
68{ 68{
69 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 69 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
70 struct ieee80211_local *local = sdata->local; 70 struct ieee80211_local *local = sdata->local;
71 int res = 0, rates, i, j; 71 int rates, i;
72 struct sk_buff *skb; 72 struct sk_buff *skb;
73 struct ieee80211_mgmt *mgmt; 73 struct ieee80211_mgmt *mgmt;
74 u8 *pos; 74 u8 *pos;
75 struct ieee80211_supported_band *sband; 75 struct ieee80211_supported_band *sband;
76 union iwreq_data wrqu; 76 u32 bss_change;
77 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
77 78
78 if (local->ops->reset_tsf) { 79 /* Reset own TSF to allow time synchronization work. */
79 /* Reset own TSF to allow time synchronization work. */ 80 drv_reset_tsf(local);
80 local->ops->reset_tsf(local_to_hw(local));
81 }
82 81
83 if ((ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET) && 82 skb = ifibss->skb;
84 memcmp(ifibss->bssid, bssid, ETH_ALEN) == 0) 83 rcu_assign_pointer(ifibss->presp, NULL);
85 return res; 84 synchronize_rcu();
85 skb->data = skb->head;
86 skb->len = 0;
87 skb_reset_tail_pointer(skb);
88 skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
86 89
87 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 90 if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
88 if (!skb) { 91 sta_info_flush(sdata->local, sdata);
89 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
90 "response\n", sdata->dev->name);
91 return -ENOMEM;
92 }
93
94 if (!(ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET)) {
95 /* Remove possible STA entries from other IBSS networks. */
96 sta_info_flush_delayed(sdata);
97 }
98 92
99 memcpy(ifibss->bssid, bssid, ETH_ALEN); 93 memcpy(ifibss->bssid, bssid, ETH_ALEN);
100 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
101 if (res)
102 return res;
103
104 local->hw.conf.beacon_int = beacon_int >= 10 ? beacon_int : 10;
105 94
106 sdata->drop_unencrypted = capability & 95 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
107 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
108 96
109 res = ieee80211_set_freq(sdata, freq); 97 local->oper_channel = chan;
98 local->oper_channel_type = NL80211_CHAN_NO_HT;
99 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
110 100
111 if (res) 101 sband = local->hw.wiphy->bands[chan->band];
112 return res;
113 102
114 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 103 /* build supported rates array */
104 pos = supp_rates;
105 for (i = 0; i < sband->n_bitrates; i++) {
106 int rate = sband->bitrates[i].bitrate;
107 u8 basic = 0;
108 if (basic_rates & BIT(i))
109 basic = 0x80;
110 *pos++ = basic | (u8) (rate / 5);
111 }
115 112
116 /* Build IBSS probe response */ 113 /* Build IBSS probe response */
117 114 mgmt = (void *) skb_put(skb, 24 + sizeof(mgmt->u.beacon));
118 skb_reserve(skb, local->hw.extra_tx_headroom);
119
120 mgmt = (struct ieee80211_mgmt *)
121 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
122 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 115 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
123 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 116 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
124 IEEE80211_STYPE_PROBE_RESP); 117 IEEE80211_STYPE_PROBE_RESP);
125 memset(mgmt->da, 0xff, ETH_ALEN); 118 memset(mgmt->da, 0xff, ETH_ALEN);
126 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 119 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
127 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 120 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
128 mgmt->u.beacon.beacon_int = 121 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
129 cpu_to_le16(local->hw.conf.beacon_int);
130 mgmt->u.beacon.timestamp = cpu_to_le64(tsf); 122 mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
131 mgmt->u.beacon.capab_info = cpu_to_le16(capability); 123 mgmt->u.beacon.capab_info = cpu_to_le16(capability);
132 124
@@ -135,7 +127,7 @@ static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
135 *pos++ = ifibss->ssid_len; 127 *pos++ = ifibss->ssid_len;
136 memcpy(pos, ifibss->ssid, ifibss->ssid_len); 128 memcpy(pos, ifibss->ssid, ifibss->ssid_len);
137 129
138 rates = supp_rates_len; 130 rates = sband->n_bitrates;
139 if (rates > 8) 131 if (rates > 8)
140 rates = 8; 132 rates = 8;
141 pos = skb_put(skb, 2 + rates); 133 pos = skb_put(skb, 2 + rates);
@@ -147,7 +139,7 @@ static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
147 pos = skb_put(skb, 2 + 1); 139 pos = skb_put(skb, 2 + 1);
148 *pos++ = WLAN_EID_DS_PARAMS; 140 *pos++ = WLAN_EID_DS_PARAMS;
149 *pos++ = 1; 141 *pos++ = 1;
150 *pos++ = ieee80211_frequency_to_channel(freq); 142 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
151 } 143 }
152 144
153 pos = skb_put(skb, 2 + 2); 145 pos = skb_put(skb, 2 + 2);
@@ -157,51 +149,73 @@ static int __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
157 *pos++ = 0; 149 *pos++ = 0;
158 *pos++ = 0; 150 *pos++ = 0;
159 151
160 if (supp_rates_len > 8) { 152 if (sband->n_bitrates > 8) {
161 rates = supp_rates_len - 8; 153 rates = sband->n_bitrates - 8;
162 pos = skb_put(skb, 2 + rates); 154 pos = skb_put(skb, 2 + rates);
163 *pos++ = WLAN_EID_EXT_SUPP_RATES; 155 *pos++ = WLAN_EID_EXT_SUPP_RATES;
164 *pos++ = rates; 156 *pos++ = rates;
165 memcpy(pos, &supp_rates[8], rates); 157 memcpy(pos, &supp_rates[8], rates);
166 } 158 }
167 159
168 ifibss->probe_resp = skb; 160 if (ifibss->ie_len)
161 memcpy(skb_put(skb, ifibss->ie_len),
162 ifibss->ie, ifibss->ie_len);
169 163
170 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON | 164 rcu_assign_pointer(ifibss->presp, skb);
171 IEEE80211_IFCC_BEACON_ENABLED);
172 165
166 sdata->vif.bss_conf.beacon_int = beacon_int;
167 bss_change = BSS_CHANGED_BEACON_INT;
168 bss_change |= ieee80211_reset_erp_info(sdata);
169 bss_change |= BSS_CHANGED_BSSID;
170 bss_change |= BSS_CHANGED_BEACON;
171 bss_change |= BSS_CHANGED_BEACON_ENABLED;
172 ieee80211_bss_info_change_notify(sdata, bss_change);
173 173
174 rates = 0; 174 ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
175 for (i = 0; i < supp_rates_len; i++) {
176 int bitrate = (supp_rates[i] & 0x7f) * 5;
177 for (j = 0; j < sband->n_bitrates; j++)
178 if (sband->bitrates[j].bitrate == bitrate)
179 rates |= BIT(j);
180 }
181 175
182 ieee80211_sta_def_wmm_params(sdata, supp_rates_len, supp_rates);
183
184 ifibss->flags |= IEEE80211_IBSS_PREV_BSSID_SET;
185 ifibss->state = IEEE80211_IBSS_MLME_JOINED; 176 ifibss->state = IEEE80211_IBSS_MLME_JOINED;
186 mod_timer(&ifibss->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 177 mod_timer(&ifibss->timer,
187 178 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
188 memset(&wrqu, 0, sizeof(wrqu));
189 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
190 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
191 179
192 return res; 180 cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
181 mgmt, skb->len, 0, GFP_KERNEL);
182 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
193} 183}
194 184
195static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 185static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
196 struct ieee80211_bss *bss) 186 struct ieee80211_bss *bss)
197{ 187{
198 return __ieee80211_sta_join_ibss(sdata, 188 struct ieee80211_supported_band *sband;
199 bss->cbss.bssid, 189 u32 basic_rates;
200 bss->cbss.beacon_interval, 190 int i, j;
201 bss->cbss.channel->center_freq, 191 u16 beacon_int = bss->cbss.beacon_interval;
202 bss->supp_rates_len, bss->supp_rates, 192
203 bss->cbss.capability, 193 if (beacon_int < 10)
204 bss->cbss.tsf); 194 beacon_int = 10;
195
196 sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band];
197
198 basic_rates = 0;
199
200 for (i = 0; i < bss->supp_rates_len; i++) {
201 int rate = (bss->supp_rates[i] & 0x7f) * 5;
202 bool is_basic = !!(bss->supp_rates[i] & 0x80);
203
204 for (j = 0; j < sband->n_bitrates; j++) {
205 if (sband->bitrates[j].bitrate == rate) {
206 if (is_basic)
207 basic_rates |= BIT(j);
208 break;
209 }
210 }
211 }
212
213 __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid,
214 beacon_int,
215 bss->cbss.channel,
216 basic_rates,
217 bss->cbss.capability,
218 bss->cbss.tsf);
205} 219}
206 220
207static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 221static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -277,7 +291,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
277 goto put_bss; 291 goto put_bss;
278 292
279 /* we use a fixed BSSID */ 293 /* we use a fixed BSSID */
280 if (sdata->u.ibss.flags & IEEE80211_IBSS_BSSID_SET) 294 if (sdata->u.ibss.bssid)
281 goto put_bss; 295 goto put_bss;
282 296
283 /* not an IBSS */ 297 /* not an IBSS */
@@ -322,12 +336,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
322 bitrates[rx_status->rate_idx].bitrate; 336 bitrates[rx_status->rate_idx].bitrate;
323 337
324 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); 338 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
325 } else if (local && local->ops && local->ops->get_tsf) 339 } else {
326 /* second best option: get current TSF */ 340 /*
327 rx_timestamp = local->ops->get_tsf(local_to_hw(local)); 341 * second best option: get current TSF
328 else 342 * (will return -1 if not supported)
329 /* can't merge without knowing the TSF */ 343 */
330 rx_timestamp = -1LLU; 344 rx_timestamp = drv_get_tsf(local);
345 }
331 346
332#ifdef CONFIG_MAC80211_IBSS_DEBUG 347#ifdef CONFIG_MAC80211_IBSS_DEBUG
333 printk(KERN_DEBUG "RX beacon SA=%pM BSSID=" 348 printk(KERN_DEBUG "RX beacon SA=%pM BSSID="
@@ -369,13 +384,14 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
369 struct sta_info *sta; 384 struct sta_info *sta;
370 int band = local->hw.conf.channel->band; 385 int band = local->hw.conf.channel->band;
371 386
372 /* TODO: Could consider removing the least recently used entry and 387 /*
373 * allow new one to be added. */ 388 * XXX: Consider removing the least recently used entry and
389 * allow new one to be added.
390 */
374 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 391 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
375 if (net_ratelimit()) { 392 if (net_ratelimit())
376 printk(KERN_DEBUG "%s: No room for a new IBSS STA " 393 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
377 "entry %pM\n", sdata->dev->name, addr); 394 sdata->dev->name, addr);
378 }
379 return NULL; 395 return NULL;
380 } 396 }
381 397
@@ -432,41 +448,33 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
432{ 448{
433 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 449 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
434 450
435 mod_timer(&ifibss->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 451 mod_timer(&ifibss->timer,
452 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
436 453
437 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); 454 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
455
438 if (ieee80211_sta_active_ibss(sdata)) 456 if (ieee80211_sta_active_ibss(sdata))
439 return; 457 return;
440 458
441 if ((ifibss->flags & IEEE80211_IBSS_BSSID_SET) && 459 if (ifibss->fixed_channel)
442 (!(ifibss->flags & IEEE80211_IBSS_AUTO_CHANNEL_SEL)))
443 return; 460 return;
444 461
445 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 462 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
446 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 463 "IBSS networks with same SSID (merge)\n", sdata->dev->name);
447 464
448 /* XXX maybe racy? */ 465 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
449 if (sdata->local->scan_req)
450 return;
451
452 memcpy(sdata->local->int_scan_req.ssids[0].ssid,
453 ifibss->ssid, IEEE80211_MAX_SSID_LEN);
454 sdata->local->int_scan_req.ssids[0].ssid_len = ifibss->ssid_len;
455 ieee80211_request_scan(sdata, &sdata->local->int_scan_req);
456} 466}
457 467
458static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 468static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
459{ 469{
460 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 470 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
461 struct ieee80211_local *local = sdata->local; 471 struct ieee80211_local *local = sdata->local;
462 struct ieee80211_supported_band *sband; 472 struct ieee80211_supported_band *sband;
463 u8 *pos;
464 u8 bssid[ETH_ALEN]; 473 u8 bssid[ETH_ALEN];
465 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
466 u16 capability; 474 u16 capability;
467 int i; 475 int i;
468 476
469 if (ifibss->flags & IEEE80211_IBSS_BSSID_SET) { 477 if (ifibss->fixed_bssid) {
470 memcpy(bssid, ifibss->bssid, ETH_ALEN); 478 memcpy(bssid, ifibss->bssid, ETH_ALEN);
471 } else { 479 } else {
472 /* Generate random, not broadcast, locally administered BSSID. Mix in 480 /* Generate random, not broadcast, locally administered BSSID. Mix in
@@ -482,10 +490,7 @@ static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
482 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 490 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
483 sdata->dev->name, bssid); 491 sdata->dev->name, bssid);
484 492
485 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 493 sband = local->hw.wiphy->bands[ifibss->channel->band];
486
487 if (local->hw.conf.beacon_int == 0)
488 local->hw.conf.beacon_int = 100;
489 494
490 capability = WLAN_CAPABILITY_IBSS; 495 capability = WLAN_CAPABILITY_IBSS;
491 496
@@ -494,29 +499,20 @@ static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
494 else 499 else
495 sdata->drop_unencrypted = 0; 500 sdata->drop_unencrypted = 0;
496 501
497 pos = supp_rates; 502 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
498 for (i = 0; i < sband->n_bitrates; i++) { 503 ifibss->channel, 3, /* first two are basic */
499 int rate = sband->bitrates[i].bitrate; 504 capability, 0);
500 *pos++ = (u8) (rate / 5);
501 }
502
503 return __ieee80211_sta_join_ibss(sdata,
504 bssid, local->hw.conf.beacon_int,
505 local->hw.conf.channel->center_freq,
506 sband->n_bitrates, supp_rates,
507 capability, 0);
508} 505}
509 506
510static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) 507static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
511{ 508{
512 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 509 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
513 struct ieee80211_local *local = sdata->local; 510 struct ieee80211_local *local = sdata->local;
514 struct ieee80211_bss *bss; 511 struct ieee80211_bss *bss;
512 struct ieee80211_channel *chan = NULL;
515 const u8 *bssid = NULL; 513 const u8 *bssid = NULL;
516 int active_ibss; 514 int active_ibss;
517 515 u16 capability;
518 if (ifibss->ssid_len == 0)
519 return -EINVAL;
520 516
521 active_ibss = ieee80211_sta_active_ibss(sdata); 517 active_ibss = ieee80211_sta_active_ibss(sdata);
522#ifdef CONFIG_MAC80211_IBSS_DEBUG 518#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -525,14 +521,23 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
525#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 521#endif /* CONFIG_MAC80211_IBSS_DEBUG */
526 522
527 if (active_ibss) 523 if (active_ibss)
528 return 0; 524 return;
529 525
530 if (ifibss->flags & IEEE80211_IBSS_BSSID_SET) 526 capability = WLAN_CAPABILITY_IBSS;
527 if (sdata->default_key)
528 capability |= WLAN_CAPABILITY_PRIVACY;
529
530 if (ifibss->fixed_bssid)
531 bssid = ifibss->bssid; 531 bssid = ifibss->bssid;
532 bss = (void *)cfg80211_get_bss(local->hw.wiphy, NULL, bssid, 532 if (ifibss->fixed_channel)
533 chan = ifibss->channel;
534 if (!is_zero_ether_addr(ifibss->bssid))
535 bssid = ifibss->bssid;
536 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid,
533 ifibss->ssid, ifibss->ssid_len, 537 ifibss->ssid, ifibss->ssid_len,
534 WLAN_CAPABILITY_IBSS, 538 WLAN_CAPABILITY_IBSS |
535 WLAN_CAPABILITY_IBSS); 539 WLAN_CAPABILITY_PRIVACY,
540 capability);
536 541
537#ifdef CONFIG_MAC80211_IBSS_DEBUG 542#ifdef CONFIG_MAC80211_IBSS_DEBUG
538 if (bss) 543 if (bss)
@@ -540,18 +545,14 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
540 "%pM\n", bss->cbss.bssid, ifibss->bssid); 545 "%pM\n", bss->cbss.bssid, ifibss->bssid);
541#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 546#endif /* CONFIG_MAC80211_IBSS_DEBUG */
542 547
543 if (bss && 548 if (bss && memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
544 (!(ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET) ||
545 memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN))) {
546 int ret;
547
548 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 549 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
549 " based on configured SSID\n", 550 " based on configured SSID\n",
550 sdata->dev->name, bss->cbss.bssid); 551 sdata->dev->name, bss->cbss.bssid);
551 552
552 ret = ieee80211_sta_join_ibss(sdata, bss); 553 ieee80211_sta_join_ibss(sdata, bss);
553 ieee80211_rx_bss_put(local, bss); 554 ieee80211_rx_bss_put(local, bss);
554 return ret; 555 return;
555 } else if (bss) 556 } else if (bss)
556 ieee80211_rx_bss_put(local, bss); 557 ieee80211_rx_bss_put(local, bss);
557 558
@@ -562,29 +563,24 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
562 /* Selected IBSS not found in current scan results - try to scan */ 563 /* Selected IBSS not found in current scan results - try to scan */
563 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED && 564 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED &&
564 !ieee80211_sta_active_ibss(sdata)) { 565 !ieee80211_sta_active_ibss(sdata)) {
565 mod_timer(&ifibss->timer, jiffies + 566 mod_timer(&ifibss->timer,
566 IEEE80211_IBSS_MERGE_INTERVAL); 567 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
567 } else if (time_after(jiffies, local->last_scan_completed + 568 } else if (time_after(jiffies, ifibss->last_scan_completed +
568 IEEE80211_SCAN_INTERVAL)) { 569 IEEE80211_SCAN_INTERVAL)) {
569 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 570 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
570 "join\n", sdata->dev->name); 571 "join\n", sdata->dev->name);
571 572
572 /* XXX maybe racy? */ 573 ieee80211_request_internal_scan(sdata, ifibss->ssid,
573 if (local->scan_req) 574 ifibss->ssid_len);
574 return -EBUSY;
575
576 memcpy(local->int_scan_req.ssids[0].ssid,
577 ifibss->ssid, IEEE80211_MAX_SSID_LEN);
578 local->int_scan_req.ssids[0].ssid_len = ifibss->ssid_len;
579 return ieee80211_request_scan(sdata, &local->int_scan_req);
580 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) { 575 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) {
581 int interval = IEEE80211_SCAN_INTERVAL; 576 int interval = IEEE80211_SCAN_INTERVAL;
582 577
583 if (time_after(jiffies, ifibss->ibss_join_req + 578 if (time_after(jiffies, ifibss->ibss_join_req +
584 IEEE80211_IBSS_JOIN_TIMEOUT)) { 579 IEEE80211_IBSS_JOIN_TIMEOUT)) {
585 if (!(local->oper_channel->flags & 580 if (!(local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) {
586 IEEE80211_CHAN_NO_IBSS)) 581 ieee80211_sta_create_ibss(sdata);
587 return ieee80211_sta_create_ibss(sdata); 582 return;
583 }
588 printk(KERN_DEBUG "%s: IBSS not allowed on" 584 printk(KERN_DEBUG "%s: IBSS not allowed on"
589 " %d MHz\n", sdata->dev->name, 585 " %d MHz\n", sdata->dev->name,
590 local->hw.conf.channel->center_freq); 586 local->hw.conf.channel->center_freq);
@@ -595,11 +591,9 @@ static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
595 } 591 }
596 592
597 ifibss->state = IEEE80211_IBSS_MLME_SEARCH; 593 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
598 mod_timer(&ifibss->timer, jiffies + interval); 594 mod_timer(&ifibss->timer,
599 return 0; 595 round_jiffies(jiffies + interval));
600 } 596 }
601
602 return 0;
603} 597}
604 598
605static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, 599static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -614,13 +608,10 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
614 u8 *pos, *end; 608 u8 *pos, *end;
615 609
616 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || 610 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED ||
617 len < 24 + 2 || !ifibss->probe_resp) 611 len < 24 + 2 || !ifibss->presp)
618 return; 612 return;
619 613
620 if (local->ops->tx_last_beacon) 614 tx_last_beacon = drv_tx_last_beacon(local);
621 tx_last_beacon = local->ops->tx_last_beacon(local_to_hw(local));
622 else
623 tx_last_beacon = 1;
624 615
625#ifdef CONFIG_MAC80211_IBSS_DEBUG 616#ifdef CONFIG_MAC80211_IBSS_DEBUG
626 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 617 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
@@ -649,13 +640,13 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
649 } 640 }
650 if (pos[1] != 0 && 641 if (pos[1] != 0 &&
651 (pos[1] != ifibss->ssid_len || 642 (pos[1] != ifibss->ssid_len ||
652 memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len) != 0)) { 643 !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
653 /* Ignore ProbeReq for foreign SSID */ 644 /* Ignore ProbeReq for foreign SSID */
654 return; 645 return;
655 } 646 }
656 647
657 /* Reply with ProbeResp */ 648 /* Reply with ProbeResp */
658 skb = skb_copy(ifibss->probe_resp, GFP_KERNEL); 649 skb = skb_copy(ifibss->presp, GFP_KERNEL);
659 if (!skb) 650 if (!skb)
660 return; 651 return;
661 652
@@ -746,6 +737,9 @@ static void ieee80211_ibss_work(struct work_struct *work)
746 struct ieee80211_if_ibss *ifibss; 737 struct ieee80211_if_ibss *ifibss;
747 struct sk_buff *skb; 738 struct sk_buff *skb;
748 739
740 if (WARN_ON(local->suspended))
741 return;
742
749 if (!netif_running(sdata->dev)) 743 if (!netif_running(sdata->dev))
750 return; 744 return;
751 745
@@ -782,101 +776,63 @@ static void ieee80211_ibss_timer(unsigned long data)
782 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 776 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
783 struct ieee80211_local *local = sdata->local; 777 struct ieee80211_local *local = sdata->local;
784 778
779 if (local->quiescing) {
780 ifibss->timer_running = true;
781 return;
782 }
783
785 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); 784 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
786 queue_work(local->hw.workqueue, &ifibss->work); 785 queue_work(local->hw.workqueue, &ifibss->work);
787} 786}
788 787
789void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) 788#ifdef CONFIG_PM
789void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
790{ 790{
791 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 791 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
792 792
793 INIT_WORK(&ifibss->work, ieee80211_ibss_work); 793 cancel_work_sync(&ifibss->work);
794 setup_timer(&ifibss->timer, ieee80211_ibss_timer, 794 if (del_timer_sync(&ifibss->timer))
795 (unsigned long) sdata); 795 ifibss->timer_running = true;
796 skb_queue_head_init(&ifibss->skb_queue);
797
798 ifibss->flags |= IEEE80211_IBSS_AUTO_BSSID_SEL |
799 IEEE80211_IBSS_AUTO_CHANNEL_SEL;
800} 796}
801 797
802int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata) 798void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata)
803{ 799{
804 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 800 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
805 801
806 ifibss->flags &= ~IEEE80211_IBSS_PREV_BSSID_SET; 802 if (ifibss->timer_running) {
807 803 add_timer(&ifibss->timer);
808 if (ifibss->ssid_len) 804 ifibss->timer_running = false;
809 ifibss->flags |= IEEE80211_IBSS_SSID_SET;
810 else
811 ifibss->flags &= ~IEEE80211_IBSS_SSID_SET;
812
813 ifibss->ibss_join_req = jiffies;
814 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
815 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
816
817 return 0;
818}
819
820int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
821{
822 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
823
824 if (len > IEEE80211_MAX_SSID_LEN)
825 return -EINVAL;
826
827 if (ifibss->ssid_len != len || memcmp(ifibss->ssid, ssid, len) != 0) {
828 memset(ifibss->ssid, 0, sizeof(ifibss->ssid));
829 memcpy(ifibss->ssid, ssid, len);
830 ifibss->ssid_len = len;
831 } 805 }
832
833 return ieee80211_ibss_commit(sdata);
834}
835
836int ieee80211_ibss_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
837{
838 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
839
840 memcpy(ssid, ifibss->ssid, ifibss->ssid_len);
841 *len = ifibss->ssid_len;
842
843 return 0;
844} 806}
807#endif
845 808
846int ieee80211_ibss_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) 809void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
847{ 810{
848 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 811 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
849 812
850 if (is_valid_ether_addr(bssid)) { 813 INIT_WORK(&ifibss->work, ieee80211_ibss_work);
851 memcpy(ifibss->bssid, bssid, ETH_ALEN); 814 setup_timer(&ifibss->timer, ieee80211_ibss_timer,
852 ifibss->flags |= IEEE80211_IBSS_BSSID_SET; 815 (unsigned long) sdata);
853 } else { 816 skb_queue_head_init(&ifibss->skb_queue);
854 memset(ifibss->bssid, 0, ETH_ALEN);
855 ifibss->flags &= ~IEEE80211_IBSS_BSSID_SET;
856 }
857
858 if (netif_running(sdata->dev)) {
859 if (ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID)) {
860 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
861 "the low-level driver\n", sdata->dev->name);
862 }
863 }
864
865 return ieee80211_ibss_commit(sdata);
866} 817}
867 818
868/* scan finished notification */ 819/* scan finished notification */
869void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) 820void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
870{ 821{
871 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 822 struct ieee80211_sub_if_data *sdata;
872 struct ieee80211_if_ibss *ifibss; 823
873 824 mutex_lock(&local->iflist_mtx);
874 if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) { 825 list_for_each_entry(sdata, &local->interfaces, list) {
875 ifibss = &sdata->u.ibss; 826 if (!netif_running(sdata->dev))
876 if ((!(ifibss->flags & IEEE80211_IBSS_PREV_BSSID_SET)) || 827 continue;
877 !ieee80211_sta_active_ibss(sdata)) 828 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
878 ieee80211_sta_find_ibss(sdata); 829 continue;
830 if (!sdata->u.ibss.ssid_len)
831 continue;
832 sdata->u.ibss.last_scan_completed = jiffies;
833 ieee80211_sta_find_ibss(sdata);
879 } 834 }
835 mutex_unlock(&local->iflist_mtx);
880} 836}
881 837
882ieee80211_rx_result 838ieee80211_rx_result
@@ -906,3 +862,86 @@ ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
906 862
907 return RX_DROP_MONITOR; 863 return RX_DROP_MONITOR;
908} 864}
865
866int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
867 struct cfg80211_ibss_params *params)
868{
869 struct sk_buff *skb;
870
871 if (params->bssid) {
872 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
873 sdata->u.ibss.fixed_bssid = true;
874 } else
875 sdata->u.ibss.fixed_bssid = false;
876
877 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
878
879 sdata->u.ibss.channel = params->channel;
880 sdata->u.ibss.fixed_channel = params->channel_fixed;
881
882 if (params->ie) {
883 sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
884 GFP_KERNEL);
885 if (sdata->u.ibss.ie)
886 sdata->u.ibss.ie_len = params->ie_len;
887 }
888
889 skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
890 36 /* bitrates */ +
891 34 /* SSID */ +
892 3 /* DS params */ +
893 4 /* IBSS params */ +
894 params->ie_len);
895 if (!skb)
896 return -ENOMEM;
897
898 sdata->u.ibss.skb = skb;
899 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
900 sdata->u.ibss.ibss_join_req = jiffies;
901
902 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
903
904 /*
905 * The ssid_len setting below is used to see whether
906 * we are active, and we need all other settings
907 * before that may get visible.
908 */
909 mb();
910
911 sdata->u.ibss.ssid_len = params->ssid_len;
912
913 ieee80211_recalc_idle(sdata->local);
914
915 set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
916 queue_work(sdata->local->hw.workqueue, &sdata->u.ibss.work);
917
918 return 0;
919}
920
921int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
922{
923 struct sk_buff *skb;
924
925 del_timer_sync(&sdata->u.ibss.timer);
926 clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
927 cancel_work_sync(&sdata->u.ibss.work);
928 clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
929
930 sta_info_flush(sdata->local, sdata);
931
932 /* remove beacon */
933 kfree(sdata->u.ibss.ie);
934 skb = sdata->u.ibss.presp;
935 rcu_assign_pointer(sdata->u.ibss.presp, NULL);
936 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
937 synchronize_rcu();
938 kfree_skb(skb);
939
940 skb_queue_purge(&sdata->u.ibss.skb_queue);
941 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
942 sdata->u.ibss.ssid_len = 0;
943
944 ieee80211_recalc_idle(sdata->local);
945
946 return 0;
947}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index e6ed78cb16b3..c088c46704a3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <net/cfg80211.h> 26#include <net/cfg80211.h>
27#include <net/wireless.h>
28#include <net/iw_handler.h> 27#include <net/iw_handler.h>
29#include <net/mac80211.h> 28#include <net/mac80211.h>
30#include "key.h" 29#include "key.h"
@@ -236,7 +235,7 @@ struct mesh_preq_queue {
236#define IEEE80211_STA_ASSOCIATED BIT(4) 235#define IEEE80211_STA_ASSOCIATED BIT(4)
237#define IEEE80211_STA_PROBEREQ_POLL BIT(5) 236#define IEEE80211_STA_PROBEREQ_POLL BIT(5)
238#define IEEE80211_STA_CREATE_IBSS BIT(6) 237#define IEEE80211_STA_CREATE_IBSS BIT(6)
239/* hole at 7, please re-use */ 238#define IEEE80211_STA_CONTROL_PORT BIT(7)
240#define IEEE80211_STA_WMM_ENABLED BIT(8) 239#define IEEE80211_STA_WMM_ENABLED BIT(8)
241/* hole at 9, please re-use */ 240/* hole at 9, please re-use */
242#define IEEE80211_STA_AUTO_SSID_SEL BIT(10) 241#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
@@ -249,9 +248,8 @@ struct mesh_preq_queue {
249#define IEEE80211_STA_EXT_SME BIT(17) 248#define IEEE80211_STA_EXT_SME BIT(17)
250/* flags for MLME request */ 249/* flags for MLME request */
251#define IEEE80211_STA_REQ_SCAN 0 250#define IEEE80211_STA_REQ_SCAN 0
252#define IEEE80211_STA_REQ_DIRECT_PROBE 1 251#define IEEE80211_STA_REQ_AUTH 1
253#define IEEE80211_STA_REQ_AUTH 2 252#define IEEE80211_STA_REQ_RUN 2
254#define IEEE80211_STA_REQ_RUN 3
255 253
256/* bitfield of allowed auth algs */ 254/* bitfield of allowed auth algs */
257#define IEEE80211_AUTH_ALG_OPEN BIT(0) 255#define IEEE80211_AUTH_ALG_OPEN BIT(0)
@@ -295,6 +293,9 @@ struct ieee80211_if_managed {
295 int auth_tries; /* retries for auth req */ 293 int auth_tries; /* retries for auth req */
296 int assoc_tries; /* retries for assoc req */ 294 int assoc_tries; /* retries for assoc req */
297 295
296 unsigned long timers_running; /* used for quiesce/restart */
297 bool powersave; /* powersave requested for this iface */
298
298 unsigned long request; 299 unsigned long request;
299 300
300 unsigned long last_probe; 301 unsigned long last_probe;
@@ -306,6 +307,8 @@ struct ieee80211_if_managed {
306 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ 307 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
307 int auth_transaction; 308 int auth_transaction;
308 309
310 u32 beacon_crc;
311
309 enum { 312 enum {
310 IEEE80211_MFP_DISABLED, 313 IEEE80211_MFP_DISABLED,
311 IEEE80211_MFP_OPTIONAL, 314 IEEE80211_MFP_OPTIONAL,
@@ -319,14 +322,6 @@ struct ieee80211_if_managed {
319 size_t sme_auth_ie_len; 322 size_t sme_auth_ie_len;
320}; 323};
321 324
322enum ieee80211_ibss_flags {
323 IEEE80211_IBSS_AUTO_CHANNEL_SEL = BIT(0),
324 IEEE80211_IBSS_AUTO_BSSID_SEL = BIT(1),
325 IEEE80211_IBSS_BSSID_SET = BIT(2),
326 IEEE80211_IBSS_PREV_BSSID_SET = BIT(3),
327 IEEE80211_IBSS_SSID_SET = BIT(4),
328};
329
330enum ieee80211_ibss_request { 325enum ieee80211_ibss_request {
331 IEEE80211_IBSS_REQ_RUN = 0, 326 IEEE80211_IBSS_REQ_RUN = 0,
332}; 327};
@@ -337,17 +332,23 @@ struct ieee80211_if_ibss {
337 332
338 struct sk_buff_head skb_queue; 333 struct sk_buff_head skb_queue;
339 334
340 u8 ssid[IEEE80211_MAX_SSID_LEN]; 335 unsigned long request;
341 u8 ssid_len; 336 unsigned long last_scan_completed;
342 337
343 u32 flags; 338 bool timer_running;
344 339
345 u8 bssid[ETH_ALEN]; 340 bool fixed_bssid;
341 bool fixed_channel;
346 342
347 unsigned long request; 343 u8 bssid[ETH_ALEN];
344 u8 ssid[IEEE80211_MAX_SSID_LEN];
345 u8 ssid_len, ie_len;
346 u8 *ie;
347 struct ieee80211_channel *channel;
348 348
349 unsigned long ibss_join_req; 349 unsigned long ibss_join_req;
350 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ 350 /* probe response/beacon for IBSS */
351 struct sk_buff *presp, *skb;
351 352
352 enum { 353 enum {
353 IEEE80211_IBSS_MLME_SEARCH, 354 IEEE80211_IBSS_MLME_SEARCH,
@@ -361,6 +362,8 @@ struct ieee80211_if_mesh {
361 struct timer_list mesh_path_timer; 362 struct timer_list mesh_path_timer;
362 struct sk_buff_head skb_queue; 363 struct sk_buff_head skb_queue;
363 364
365 unsigned long timers_running;
366
364 bool housekeeping; 367 bool housekeeping;
365 368
366 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 369 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
@@ -430,6 +433,12 @@ struct ieee80211_sub_if_data {
430 433
431 int drop_unencrypted; 434 int drop_unencrypted;
432 435
436 /*
437 * keep track of whether the HT opmode (stored in
438 * vif.bss_info.ht_operation_mode) is valid.
439 */
440 bool ht_opmode_valid;
441
433 /* Fragment table for host-based reassembly */ 442 /* Fragment table for host-based reassembly */
434 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 443 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
435 unsigned int fragment_next; 444 unsigned int fragment_next;
@@ -606,6 +615,21 @@ struct ieee80211_local {
606 unsigned int filter_flags; /* FIF_* */ 615 unsigned int filter_flags; /* FIF_* */
607 struct iw_statistics wstats; 616 struct iw_statistics wstats;
608 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 617 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
618
619 /*
620 * suspended is true if we finished all the suspend _and_ we have
621 * not yet come up from resume. This is to be used by mac80211
622 * to ensure driver sanity during suspend and mac80211's own
623 * sanity. It can eventually be used for WoW as well.
624 */
625 bool suspended;
626
627 /*
628 * quiescing is true during the suspend process _only_ to
629 * ease timer cancelling etc.
630 */
631 bool quiescing;
632
609 int tx_headroom; /* required headroom for hardware/radiotap */ 633 int tx_headroom; /* required headroom for hardware/radiotap */
610 634
611 /* Tasklet and skb queue to process calls from IRQ mode. All frames 635 /* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -626,8 +650,6 @@ struct ieee80211_local {
626 spinlock_t sta_lock; 650 spinlock_t sta_lock;
627 unsigned long num_sta; 651 unsigned long num_sta;
628 struct list_head sta_list; 652 struct list_head sta_list;
629 struct list_head sta_flush_list;
630 struct work_struct sta_flush_work;
631 struct sta_info *sta_hash[STA_HASH_SIZE]; 653 struct sta_info *sta_hash[STA_HASH_SIZE];
632 struct timer_list sta_cleanup; 654 struct timer_list sta_cleanup;
633 655
@@ -647,9 +669,6 @@ struct ieee80211_local {
647 669
648 struct rate_control_ref *rate_ctrl; 670 struct rate_control_ref *rate_ctrl;
649 671
650 int rts_threshold;
651 int fragmentation_threshold;
652
653 struct crypto_blkcipher *wep_tx_tfm; 672 struct crypto_blkcipher *wep_tx_tfm;
654 struct crypto_blkcipher *wep_rx_tfm; 673 struct crypto_blkcipher *wep_rx_tfm;
655 u32 wep_iv; 674 u32 wep_iv;
@@ -666,15 +685,18 @@ struct ieee80211_local {
666 685
667 686
668 /* Scanning and BSS list */ 687 /* Scanning and BSS list */
688 struct mutex scan_mtx;
669 bool sw_scanning, hw_scanning; 689 bool sw_scanning, hw_scanning;
670 struct cfg80211_ssid scan_ssid; 690 struct cfg80211_ssid scan_ssid;
671 struct cfg80211_scan_request int_scan_req; 691 struct cfg80211_scan_request int_scan_req;
672 struct cfg80211_scan_request *scan_req; 692 struct cfg80211_scan_request *scan_req;
673 struct ieee80211_channel *scan_channel; 693 struct ieee80211_channel *scan_channel;
694 const u8 *orig_ies;
695 int orig_ies_len;
674 int scan_channel_idx; 696 int scan_channel_idx;
697 int scan_ies_len;
675 698
676 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 699 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
677 unsigned long last_scan_completed;
678 struct delayed_work scan_work; 700 struct delayed_work scan_work;
679 struct ieee80211_sub_if_data *scan_sdata; 701 struct ieee80211_sub_if_data *scan_sdata;
680 enum nl80211_channel_type oper_channel_type; 702 enum nl80211_channel_type oper_channel_type;
@@ -736,28 +758,32 @@ struct ieee80211_local {
736 int wifi_wme_noack_test; 758 int wifi_wme_noack_test;
737 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 759 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
738 760
739 bool powersave;
740 bool pspolling; 761 bool pspolling;
762 /*
763 * PS can only be enabled when we have exactly one managed
764 * interface (and monitors) in PS, this then points there.
765 */
766 struct ieee80211_sub_if_data *ps_sdata;
741 struct work_struct dynamic_ps_enable_work; 767 struct work_struct dynamic_ps_enable_work;
742 struct work_struct dynamic_ps_disable_work; 768 struct work_struct dynamic_ps_disable_work;
743 struct timer_list dynamic_ps_timer; 769 struct timer_list dynamic_ps_timer;
770 struct notifier_block network_latency_notifier;
744 771
745 int user_power_level; /* in dBm */ 772 int user_power_level; /* in dBm */
746 int power_constr_level; /* in dBm */ 773 int power_constr_level; /* in dBm */
747 774
775 struct work_struct restart_work;
776
748#ifdef CONFIG_MAC80211_DEBUGFS 777#ifdef CONFIG_MAC80211_DEBUGFS
749 struct local_debugfsdentries { 778 struct local_debugfsdentries {
750 struct dentry *rcdir; 779 struct dentry *rcdir;
751 struct dentry *rcname; 780 struct dentry *rcname;
752 struct dentry *frequency; 781 struct dentry *frequency;
753 struct dentry *rts_threshold;
754 struct dentry *fragmentation_threshold;
755 struct dentry *short_retry_limit;
756 struct dentry *long_retry_limit;
757 struct dentry *total_ps_buffered; 782 struct dentry *total_ps_buffered;
758 struct dentry *wep_iv; 783 struct dentry *wep_iv;
759 struct dentry *tsf; 784 struct dentry *tsf;
760 struct dentry *reset; 785 struct dentry *reset;
786 struct dentry *noack;
761 struct dentry *statistics; 787 struct dentry *statistics;
762 struct local_debugfsdentries_statsdentries { 788 struct local_debugfsdentries_statsdentries {
763 struct dentry *transmitted_fragment_count; 789 struct dentry *transmitted_fragment_count;
@@ -830,7 +856,7 @@ struct ieee802_11_elems {
830 u8 *fh_params; 856 u8 *fh_params;
831 u8 *ds_params; 857 u8 *ds_params;
832 u8 *cf_params; 858 u8 *cf_params;
833 u8 *tim; 859 struct ieee80211_tim_ie *tim;
834 u8 *ibss_params; 860 u8 *ibss_params;
835 u8 *challenge; 861 u8 *challenge;
836 u8 *wpa; 862 u8 *wpa;
@@ -903,7 +929,6 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
903 929
904 930
905int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); 931int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
906int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed);
907void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); 932void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
908void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 933void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
909 u32 changed); 934 u32 changed);
@@ -927,12 +952,16 @@ int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason
927int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason); 952int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
928void ieee80211_send_pspoll(struct ieee80211_local *local, 953void ieee80211_send_pspoll(struct ieee80211_local *local,
929 struct ieee80211_sub_if_data *sdata); 954 struct ieee80211_sub_if_data *sdata);
955void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
956int ieee80211_max_network_latency(struct notifier_block *nb,
957 unsigned long data, void *dummy);
958void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
959 struct ieee80211_channel_sw_ie *sw_elem,
960 struct ieee80211_bss *bss);
961void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
962void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
930 963
931/* IBSS code */ 964/* IBSS code */
932int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata);
933int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
934int ieee80211_ibss_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
935int ieee80211_ibss_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
936void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 965void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
937void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); 966void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
938ieee80211_rx_result 967ieee80211_rx_result
@@ -940,14 +969,22 @@ ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
940 struct ieee80211_rx_status *rx_status); 969 struct ieee80211_rx_status *rx_status);
941struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 970struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
942 u8 *bssid, u8 *addr, u32 supp_rates); 971 u8 *bssid, u8 *addr, u32 supp_rates);
972int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
973 struct cfg80211_ibss_params *params);
974int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
975void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
976void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
943 977
944/* scan/BSS handling */ 978/* scan/BSS handling */
945void ieee80211_scan_work(struct work_struct *work); 979void ieee80211_scan_work(struct work_struct *work);
980int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
981 const u8 *ssid, u8 ssid_len);
946int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 982int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
947 struct cfg80211_scan_request *req); 983 struct cfg80211_scan_request *req);
948int ieee80211_scan_results(struct ieee80211_local *local, 984int ieee80211_scan_results(struct ieee80211_local *local,
949 struct iw_request_info *info, 985 struct iw_request_info *info,
950 char *buf, size_t len); 986 char *buf, size_t len);
987void ieee80211_scan_cancel(struct ieee80211_local *local);
951ieee80211_rx_result 988ieee80211_rx_result
952ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, 989ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
953 struct sk_buff *skb, 990 struct sk_buff *skb,
@@ -956,9 +993,6 @@ int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
956 const char *ie, size_t len); 993 const char *ie, size_t len);
957 994
958void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); 995void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
959void ieee80211_scan_failed(struct ieee80211_local *local);
960int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
961 struct cfg80211_scan_request *req);
962struct ieee80211_bss * 996struct ieee80211_bss *
963ieee80211_bss_info_update(struct ieee80211_local *local, 997ieee80211_bss_info_update(struct ieee80211_local *local,
964 struct ieee80211_rx_status *rx_status, 998 struct ieee80211_rx_status *rx_status,
@@ -983,6 +1017,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
983 enum nl80211_iftype type); 1017 enum nl80211_iftype type);
984void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); 1018void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
985void ieee80211_remove_interfaces(struct ieee80211_local *local); 1019void ieee80211_remove_interfaces(struct ieee80211_local *local);
1020u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
1021void ieee80211_recalc_idle(struct ieee80211_local *local);
986 1022
987/* tx handling */ 1023/* tx handling */
988void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1024void ieee80211_clear_tx_pending(struct ieee80211_local *local);
@@ -995,9 +1031,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
995void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 1031void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
996 struct ieee80211_ht_cap *ht_cap_ie, 1032 struct ieee80211_ht_cap *ht_cap_ie,
997 struct ieee80211_sta_ht_cap *ht_cap); 1033 struct ieee80211_sta_ht_cap *ht_cap);
998u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
999 struct ieee80211_ht_info *hti,
1000 u16 ap_ht_cap_flags);
1001void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); 1034void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
1002void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1035void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1003 const u8 *da, u16 tid, 1036 const u8 *da, u16 tid,
@@ -1027,24 +1060,23 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1027void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1060void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1028 struct ieee80211_mgmt *mgmt, 1061 struct ieee80211_mgmt *mgmt,
1029 size_t len); 1062 size_t len);
1030void ieee80211_chswitch_timer(unsigned long data); 1063
1031void ieee80211_chswitch_work(struct work_struct *work); 1064/* Suspend/resume and hw reconfiguration */
1032void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1065int ieee80211_reconfig(struct ieee80211_local *local);
1033 struct ieee80211_channel_sw_ie *sw_elem, 1066
1034 struct ieee80211_bss *bss);
1035void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1036 u16 capab_info, u8 *pwr_constr_elem,
1037 u8 pwr_constr_elem_len);
1038
1039/* Suspend/resume */
1040#ifdef CONFIG_PM 1067#ifdef CONFIG_PM
1041int __ieee80211_suspend(struct ieee80211_hw *hw); 1068int __ieee80211_suspend(struct ieee80211_hw *hw);
1042int __ieee80211_resume(struct ieee80211_hw *hw); 1069
1070static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1071{
1072 return ieee80211_reconfig(hw_to_local(hw));
1073}
1043#else 1074#else
1044static inline int __ieee80211_suspend(struct ieee80211_hw *hw) 1075static inline int __ieee80211_suspend(struct ieee80211_hw *hw)
1045{ 1076{
1046 return 0; 1077 return 0;
1047} 1078}
1079
1048static inline int __ieee80211_resume(struct ieee80211_hw *hw) 1080static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1049{ 1081{
1050 return 0; 1082 return 0;
@@ -1053,19 +1085,20 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1053 1085
1054/* utility functions/constants */ 1086/* utility functions/constants */
1055extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1087extern void *mac80211_wiphy_privid; /* for wiphy privid */
1056extern const unsigned char rfc1042_header[6];
1057extern const unsigned char bridge_tunnel_header[6];
1058u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 1088u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
1059 enum nl80211_iftype type); 1089 enum nl80211_iftype type);
1060int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1090int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
1061 int rate, int erp, int short_preamble); 1091 int rate, int erp, int short_preamble);
1062void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 1092void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
1063 struct ieee80211_hdr *hdr); 1093 struct ieee80211_hdr *hdr, const u8 *tsc);
1064void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1094void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1065void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, 1095void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1066 int encrypt); 1096 int encrypt);
1067void ieee802_11_parse_elems(u8 *start, size_t len, 1097void ieee802_11_parse_elems(u8 *start, size_t len,
1068 struct ieee802_11_elems *elems); 1098 struct ieee802_11_elems *elems);
1099u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
1100 struct ieee802_11_elems *elems,
1101 u64 filter, u32 crc);
1069int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); 1102int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
1070u32 ieee80211_mandatory_rates(struct ieee80211_local *local, 1103u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
1071 enum ieee80211_band band); 1104 enum ieee80211_band band);
@@ -1093,9 +1126,11 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1093 u16 transaction, u16 auth_alg, 1126 u16 transaction, u16 auth_alg,
1094 u8 *extra, size_t extra_len, 1127 u8 *extra, size_t extra_len,
1095 const u8 *bssid, int encrypt); 1128 const u8 *bssid, int encrypt);
1129int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1130 const u8 *ie, size_t ie_len);
1096void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1131void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1097 u8 *ssid, size_t ssid_len, 1132 const u8 *ssid, size_t ssid_len,
1098 u8 *ie, size_t ie_len); 1133 const u8 *ie, size_t ie_len);
1099 1134
1100void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1135void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1101 const size_t supp_rates_len, 1136 const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 91e8e1bacaaa..8c9f1c722cdb 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -20,6 +20,7 @@
20#include "debugfs_netdev.h" 20#include "debugfs_netdev.h"
21#include "mesh.h" 21#include "mesh.h"
22#include "led.h" 22#include "led.h"
23#include "driver-ops.h"
23 24
24/** 25/**
25 * DOC: Interface list locking 26 * DOC: Interface list locking
@@ -164,9 +165,7 @@ static int ieee80211_open(struct net_device *dev)
164 } 165 }
165 166
166 if (local->open_count == 0) { 167 if (local->open_count == 0) {
167 res = 0; 168 res = drv_start(local);
168 if (local->ops->start)
169 res = local->ops->start(local_to_hw(local));
170 if (res) 169 if (res)
171 goto err_del_bss; 170 goto err_del_bss;
172 /* we're brought up, everything changes */ 171 /* we're brought up, everything changes */
@@ -199,8 +198,8 @@ static int ieee80211_open(struct net_device *dev)
199 * Validate the MAC address for this device. 198 * Validate the MAC address for this device.
200 */ 199 */
201 if (!is_valid_ether_addr(dev->dev_addr)) { 200 if (!is_valid_ether_addr(dev->dev_addr)) {
202 if (!local->open_count && local->ops->stop) 201 if (!local->open_count)
203 local->ops->stop(local_to_hw(local)); 202 drv_stop(local);
204 return -EADDRNOTAVAIL; 203 return -EADDRNOTAVAIL;
205 } 204 }
206 205
@@ -235,17 +234,13 @@ static int ieee80211_open(struct net_device *dev)
235 netif_addr_unlock_bh(local->mdev); 234 netif_addr_unlock_bh(local->mdev);
236 break; 235 break;
237 case NL80211_IFTYPE_STATION: 236 case NL80211_IFTYPE_STATION:
238 case NL80211_IFTYPE_ADHOC: 237 sdata->u.mgd.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
239 if (sdata->vif.type == NL80211_IFTYPE_STATION)
240 sdata->u.mgd.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
241 else
242 sdata->u.ibss.flags &= ~IEEE80211_IBSS_PREV_BSSID_SET;
243 /* fall through */ 238 /* fall through */
244 default: 239 default:
245 conf.vif = &sdata->vif; 240 conf.vif = &sdata->vif;
246 conf.type = sdata->vif.type; 241 conf.type = sdata->vif.type;
247 conf.mac_addr = dev->dev_addr; 242 conf.mac_addr = dev->dev_addr;
248 res = local->ops->add_interface(local_to_hw(local), &conf); 243 res = drv_add_interface(local, &conf);
249 if (res) 244 if (res)
250 goto err_stop; 245 goto err_stop;
251 246
@@ -306,6 +301,8 @@ static int ieee80211_open(struct net_device *dev)
306 if (sdata->flags & IEEE80211_SDATA_PROMISC) 301 if (sdata->flags & IEEE80211_SDATA_PROMISC)
307 atomic_inc(&local->iff_promiscs); 302 atomic_inc(&local->iff_promiscs);
308 303
304 hw_reconf_flags |= __ieee80211_recalc_idle(local);
305
309 local->open_count++; 306 local->open_count++;
310 if (hw_reconf_flags) { 307 if (hw_reconf_flags) {
311 ieee80211_hw_config(local, hw_reconf_flags); 308 ieee80211_hw_config(local, hw_reconf_flags);
@@ -317,6 +314,8 @@ static int ieee80211_open(struct net_device *dev)
317 ieee80211_set_wmm_default(sdata); 314 ieee80211_set_wmm_default(sdata);
318 } 315 }
319 316
317 ieee80211_recalc_ps(local, -1);
318
320 /* 319 /*
321 * ieee80211_sta_work is disabled while network interface 320 * ieee80211_sta_work is disabled while network interface
322 * is down. Therefore, some configuration changes may not 321 * is down. Therefore, some configuration changes may not
@@ -325,17 +324,15 @@ static int ieee80211_open(struct net_device *dev)
325 */ 324 */
326 if (sdata->vif.type == NL80211_IFTYPE_STATION) 325 if (sdata->vif.type == NL80211_IFTYPE_STATION)
327 queue_work(local->hw.workqueue, &sdata->u.mgd.work); 326 queue_work(local->hw.workqueue, &sdata->u.mgd.work);
328 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
329 queue_work(local->hw.workqueue, &sdata->u.ibss.work);
330 327
331 netif_tx_start_all_queues(dev); 328 netif_tx_start_all_queues(dev);
332 329
333 return 0; 330 return 0;
334 err_del_interface: 331 err_del_interface:
335 local->ops->remove_interface(local_to_hw(local), &conf); 332 drv_remove_interface(local, &conf);
336 err_stop: 333 err_stop:
337 if (!local->open_count && local->ops->stop) 334 if (!local->open_count)
338 local->ops->stop(local_to_hw(local)); 335 drv_stop(local);
339 err_del_bss: 336 err_del_bss:
340 sdata->bss = NULL; 337 sdata->bss = NULL;
341 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 338 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -497,7 +494,6 @@ static int ieee80211_stop(struct net_device *dev)
497 /* fall through */ 494 /* fall through */
498 case NL80211_IFTYPE_ADHOC: 495 case NL80211_IFTYPE_ADHOC:
499 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 496 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
500 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
501 del_timer_sync(&sdata->u.ibss.timer); 497 del_timer_sync(&sdata->u.ibss.timer);
502 cancel_work_sync(&sdata->u.ibss.work); 498 cancel_work_sync(&sdata->u.ibss.work);
503 synchronize_rcu(); 499 synchronize_rcu();
@@ -549,17 +545,20 @@ static int ieee80211_stop(struct net_device *dev)
549 conf.mac_addr = dev->dev_addr; 545 conf.mac_addr = dev->dev_addr;
550 /* disable all keys for as long as this netdev is down */ 546 /* disable all keys for as long as this netdev is down */
551 ieee80211_disable_keys(sdata); 547 ieee80211_disable_keys(sdata);
552 local->ops->remove_interface(local_to_hw(local), &conf); 548 drv_remove_interface(local, &conf);
553 } 549 }
554 550
555 sdata->bss = NULL; 551 sdata->bss = NULL;
556 552
553 hw_reconf_flags |= __ieee80211_recalc_idle(local);
554
555 ieee80211_recalc_ps(local, -1);
556
557 if (local->open_count == 0) { 557 if (local->open_count == 0) {
558 if (netif_running(local->mdev)) 558 if (netif_running(local->mdev))
559 dev_close(local->mdev); 559 dev_close(local->mdev);
560 560
561 if (local->ops->stop) 561 drv_stop(local);
562 local->ops->stop(local_to_hw(local));
563 562
564 ieee80211_led_radio(local, 0); 563 ieee80211_led_radio(local, 0);
565 564
@@ -649,7 +648,8 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
649 mesh_rmc_free(sdata); 648 mesh_rmc_free(sdata);
650 break; 649 break;
651 case NL80211_IFTYPE_ADHOC: 650 case NL80211_IFTYPE_ADHOC:
652 kfree_skb(sdata->u.ibss.probe_resp); 651 if (WARN_ON(sdata->u.ibss.presp))
652 kfree_skb(sdata->u.ibss.presp);
653 break; 653 break;
654 case NL80211_IFTYPE_STATION: 654 case NL80211_IFTYPE_STATION:
655 kfree(sdata->u.mgd.extra_ie); 655 kfree(sdata->u.mgd.extra_ie);
@@ -896,3 +896,74 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
896 unregister_netdevice(sdata->dev); 896 unregister_netdevice(sdata->dev);
897 } 897 }
898} 898}
899
900static u32 ieee80211_idle_off(struct ieee80211_local *local,
901 const char *reason)
902{
903 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
904 return 0;
905
906#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
907 printk(KERN_DEBUG "%s: device no longer idle - %s\n",
908 wiphy_name(local->hw.wiphy), reason);
909#endif
910
911 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
912 return IEEE80211_CONF_CHANGE_IDLE;
913}
914
915static u32 ieee80211_idle_on(struct ieee80211_local *local)
916{
917 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
918 return 0;
919
920#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
921 printk(KERN_DEBUG "%s: device now idle\n",
922 wiphy_name(local->hw.wiphy));
923#endif
924
925 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
926 return IEEE80211_CONF_CHANGE_IDLE;
927}
928
929u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
930{
931 struct ieee80211_sub_if_data *sdata;
932 int count = 0;
933
934 if (local->hw_scanning || local->sw_scanning)
935 return ieee80211_idle_off(local, "scanning");
936
937 list_for_each_entry(sdata, &local->interfaces, list) {
938 if (!netif_running(sdata->dev))
939 continue;
940 /* do not count disabled managed interfaces */
941 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
942 sdata->u.mgd.state == IEEE80211_STA_MLME_DISABLED)
943 continue;
944 /* do not count unused IBSS interfaces */
945 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
946 !sdata->u.ibss.ssid_len)
947 continue;
948 /* count everything else */
949 count++;
950 }
951
952 if (!count)
953 return ieee80211_idle_on(local);
954 else
955 return ieee80211_idle_off(local, "in use");
956
957 return 0;
958}
959
960void ieee80211_recalc_idle(struct ieee80211_local *local)
961{
962 u32 chg;
963
964 mutex_lock(&local->iflist_mtx);
965 chg = __ieee80211_recalc_idle(local);
966 mutex_unlock(&local->iflist_mtx);
967 if (chg)
968 ieee80211_hw_config(local, chg);
969}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 687acf23054d..ce267565e180 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -16,6 +16,7 @@
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "driver-ops.h"
19#include "debugfs_key.h" 20#include "debugfs_key.h"
20#include "aes_ccm.h" 21#include "aes_ccm.h"
21#include "aes_cmac.h" 22#include "aes_cmac.h"
@@ -136,8 +137,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
136 struct ieee80211_sub_if_data, 137 struct ieee80211_sub_if_data,
137 u.ap); 138 u.ap);
138 139
139 ret = key->local->ops->set_key(local_to_hw(key->local), SET_KEY, 140 ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf);
140 &sdata->vif, sta, &key->conf);
141 141
142 if (!ret) { 142 if (!ret) {
143 spin_lock(&todo_lock); 143 spin_lock(&todo_lock);
@@ -179,8 +179,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
179 struct ieee80211_sub_if_data, 179 struct ieee80211_sub_if_data,
180 u.ap); 180 u.ap);
181 181
182 ret = key->local->ops->set_key(local_to_hw(key->local), DISABLE_KEY, 182 ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif,
183 &sdata->vif, sta, &key->conf); 183 sta, &key->conf);
184 184
185 if (ret) 185 if (ret)
186 printk(KERN_ERR "mac80211-%s: failed to remove key " 186 printk(KERN_ERR "mac80211-%s: failed to remove key "
@@ -290,9 +290,11 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
290struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 290struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
291 int idx, 291 int idx,
292 size_t key_len, 292 size_t key_len,
293 const u8 *key_data) 293 const u8 *key_data,
294 size_t seq_len, const u8 *seq)
294{ 295{
295 struct ieee80211_key *key; 296 struct ieee80211_key *key;
297 int i, j;
296 298
297 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); 299 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
298 300
@@ -318,14 +320,31 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
318 case ALG_TKIP: 320 case ALG_TKIP:
319 key->conf.iv_len = TKIP_IV_LEN; 321 key->conf.iv_len = TKIP_IV_LEN;
320 key->conf.icv_len = TKIP_ICV_LEN; 322 key->conf.icv_len = TKIP_ICV_LEN;
323 if (seq) {
324 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
325 key->u.tkip.rx[i].iv32 =
326 get_unaligned_le32(&seq[2]);
327 key->u.tkip.rx[i].iv16 =
328 get_unaligned_le16(seq);
329 }
330 }
321 break; 331 break;
322 case ALG_CCMP: 332 case ALG_CCMP:
323 key->conf.iv_len = CCMP_HDR_LEN; 333 key->conf.iv_len = CCMP_HDR_LEN;
324 key->conf.icv_len = CCMP_MIC_LEN; 334 key->conf.icv_len = CCMP_MIC_LEN;
335 if (seq) {
336 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
337 for (j = 0; j < CCMP_PN_LEN; j++)
338 key->u.ccmp.rx_pn[i][j] =
339 seq[CCMP_PN_LEN - j - 1];
340 }
325 break; 341 break;
326 case ALG_AES_CMAC: 342 case ALG_AES_CMAC:
327 key->conf.iv_len = 0; 343 key->conf.iv_len = 0;
328 key->conf.icv_len = sizeof(struct ieee80211_mmie); 344 key->conf.icv_len = sizeof(struct ieee80211_mmie);
345 if (seq)
346 for (j = 0; j < 6; j++)
347 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
329 break; 348 break;
330 } 349 }
331 memcpy(key->conf.key, key_data, key_len); 350 memcpy(key->conf.key, key_data, key_len);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 215d3ef42a4f..9572e00f532c 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -144,7 +144,8 @@ struct ieee80211_key {
144struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 144struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
145 int idx, 145 int idx,
146 size_t key_len, 146 size_t key_len,
147 const u8 *key_data); 147 const u8 *key_data,
148 size_t seq_len, const u8 *seq);
148/* 149/*
149 * Insert a key into data structures (sdata, sta if necessary) 150 * Insert a key into data structures (sdata, sta if necessary)
150 * to make it used, free old key. 151 * to make it used, free old key.
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 14134193cd17..e37770ced53c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -21,10 +21,12 @@
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
23#include <linux/bitmap.h> 23#include <linux/bitmap.h>
24#include <linux/pm_qos_params.h>
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/cfg80211.h> 26#include <net/cfg80211.h>
26 27
27#include "ieee80211_i.h" 28#include "ieee80211_i.h"
29#include "driver-ops.h"
28#include "rate.h" 30#include "rate.h"
29#include "mesh.h" 31#include "mesh.h"
30#include "wep.h" 32#include "wep.h"
@@ -80,10 +82,9 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
80 /* be a bit nasty */ 82 /* be a bit nasty */
81 new_flags |= (1<<31); 83 new_flags |= (1<<31);
82 84
83 local->ops->configure_filter(local_to_hw(local), 85 drv_configure_filter(local, changed_flags, &new_flags,
84 changed_flags, &new_flags, 86 local->mdev->mc_count,
85 local->mdev->mc_count, 87 local->mdev->mc_list);
86 local->mdev->mc_list);
87 88
88 WARN_ON(new_flags & (1<<31)); 89 WARN_ON(new_flags & (1<<31));
89 90
@@ -151,93 +152,19 @@ static void ieee80211_master_set_multicast_list(struct net_device *dev)
151 ieee80211_configure_filter(local); 152 ieee80211_configure_filter(local);
152} 153}
153 154
154/* everything else */
155
156int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
157{
158 struct ieee80211_local *local = sdata->local;
159 struct ieee80211_if_conf conf;
160
161 if (WARN_ON(!netif_running(sdata->dev)))
162 return 0;
163
164 memset(&conf, 0, sizeof(conf));
165
166 if (sdata->vif.type == NL80211_IFTYPE_STATION)
167 conf.bssid = sdata->u.mgd.bssid;
168 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
169 conf.bssid = sdata->u.ibss.bssid;
170 else if (sdata->vif.type == NL80211_IFTYPE_AP)
171 conf.bssid = sdata->dev->dev_addr;
172 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
173 static const u8 zero[ETH_ALEN] = { 0 };
174 conf.bssid = zero;
175 } else {
176 WARN_ON(1);
177 return -EINVAL;
178 }
179
180 if (!local->ops->config_interface)
181 return 0;
182
183 switch (sdata->vif.type) {
184 case NL80211_IFTYPE_AP:
185 case NL80211_IFTYPE_ADHOC:
186 case NL80211_IFTYPE_MESH_POINT:
187 break;
188 default:
189 /* do not warn to simplify caller in scan.c */
190 changed &= ~IEEE80211_IFCC_BEACON_ENABLED;
191 if (WARN_ON(changed & IEEE80211_IFCC_BEACON))
192 return -EINVAL;
193 changed &= ~IEEE80211_IFCC_BEACON;
194 break;
195 }
196
197 if (changed & IEEE80211_IFCC_BEACON_ENABLED) {
198 if (local->sw_scanning) {
199 conf.enable_beacon = false;
200 } else {
201 /*
202 * Beacon should be enabled, but AP mode must
203 * check whether there is a beacon configured.
204 */
205 switch (sdata->vif.type) {
206 case NL80211_IFTYPE_AP:
207 conf.enable_beacon =
208 !!rcu_dereference(sdata->u.ap.beacon);
209 break;
210 case NL80211_IFTYPE_ADHOC:
211 conf.enable_beacon = !!sdata->u.ibss.probe_resp;
212 break;
213 case NL80211_IFTYPE_MESH_POINT:
214 conf.enable_beacon = true;
215 break;
216 default:
217 /* not reached */
218 WARN_ON(1);
219 break;
220 }
221 }
222 }
223
224 conf.changed = changed;
225
226 return local->ops->config_interface(local_to_hw(local),
227 &sdata->vif, &conf);
228}
229
230int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) 155int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
231{ 156{
232 struct ieee80211_channel *chan; 157 struct ieee80211_channel *chan, *scan_chan;
233 int ret = 0; 158 int ret = 0;
234 int power; 159 int power;
235 enum nl80211_channel_type channel_type; 160 enum nl80211_channel_type channel_type;
236 161
237 might_sleep(); 162 might_sleep();
238 163
239 if (local->sw_scanning) { 164 scan_chan = local->scan_channel;
240 chan = local->scan_channel; 165
166 if (scan_chan) {
167 chan = scan_chan;
241 channel_type = NL80211_CHAN_NO_HT; 168 channel_type = NL80211_CHAN_NO_HT;
242 } else { 169 } else {
243 chan = local->oper_channel; 170 chan = local->oper_channel;
@@ -251,7 +178,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
251 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 178 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
252 } 179 }
253 180
254 if (local->sw_scanning) 181 if (scan_chan)
255 power = chan->max_power; 182 power = chan->max_power;
256 else 183 else
257 power = local->power_constr_level ? 184 power = local->power_constr_level ?
@@ -267,7 +194,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
267 } 194 }
268 195
269 if (changed && local->open_count) { 196 if (changed && local->open_count) {
270 ret = local->ops->config(local_to_hw(local), changed); 197 ret = drv_config(local, changed);
271 /* 198 /*
272 * Goal: 199 * Goal:
273 * HW reconfiguration should never fail, the driver has told 200 * HW reconfiguration should never fail, the driver has told
@@ -292,18 +219,86 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
292 u32 changed) 219 u32 changed)
293{ 220{
294 struct ieee80211_local *local = sdata->local; 221 struct ieee80211_local *local = sdata->local;
222 static const u8 zero[ETH_ALEN] = { 0 };
295 223
296 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 224 if (!changed)
297 return; 225 return;
298 226
299 if (!changed) 227 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
228 /*
229 * While not associated, claim a BSSID of all-zeroes
230 * so that drivers don't do any weird things with the
231 * BSSID at that time.
232 */
233 if (sdata->vif.bss_conf.assoc)
234 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
235 else
236 sdata->vif.bss_conf.bssid = zero;
237 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
238 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
239 else if (sdata->vif.type == NL80211_IFTYPE_AP)
240 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr;
241 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
242 sdata->vif.bss_conf.bssid = zero;
243 } else {
244 WARN_ON(1);
300 return; 245 return;
246 }
247
248 switch (sdata->vif.type) {
249 case NL80211_IFTYPE_AP:
250 case NL80211_IFTYPE_ADHOC:
251 case NL80211_IFTYPE_MESH_POINT:
252 break;
253 default:
254 /* do not warn to simplify caller in scan.c */
255 changed &= ~BSS_CHANGED_BEACON_ENABLED;
256 if (WARN_ON(changed & BSS_CHANGED_BEACON))
257 return;
258 break;
259 }
260
261 if (changed & BSS_CHANGED_BEACON_ENABLED) {
262 if (local->sw_scanning) {
263 sdata->vif.bss_conf.enable_beacon = false;
264 } else {
265 /*
266 * Beacon should be enabled, but AP mode must
267 * check whether there is a beacon configured.
268 */
269 switch (sdata->vif.type) {
270 case NL80211_IFTYPE_AP:
271 sdata->vif.bss_conf.enable_beacon =
272 !!rcu_dereference(sdata->u.ap.beacon);
273 break;
274 case NL80211_IFTYPE_ADHOC:
275 sdata->vif.bss_conf.enable_beacon =
276 !!rcu_dereference(sdata->u.ibss.presp);
277 break;
278 case NL80211_IFTYPE_MESH_POINT:
279 sdata->vif.bss_conf.enable_beacon = true;
280 break;
281 default:
282 /* not reached */
283 WARN_ON(1);
284 break;
285 }
286 }
287 }
288
289 drv_bss_info_changed(local, &sdata->vif,
290 &sdata->vif.bss_conf, changed);
301 291
302 if (local->ops->bss_info_changed) 292 /*
303 local->ops->bss_info_changed(local_to_hw(local), 293 * DEPRECATED
304 &sdata->vif, 294 *
305 &sdata->vif.bss_conf, 295 * ~changed is just there to not do this at resume time
306 changed); 296 */
297 if (changed & BSS_CHANGED_BEACON_INT && ~changed) {
298 local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int;
299 ieee80211_hw_config(local,
300 _IEEE80211_CONF_CHANGE_BEACON_INTERVAL);
301 }
307} 302}
308 303
309u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 304u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -696,6 +691,28 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
696} 691}
697EXPORT_SYMBOL(ieee80211_tx_status); 692EXPORT_SYMBOL(ieee80211_tx_status);
698 693
694static void ieee80211_restart_work(struct work_struct *work)
695{
696 struct ieee80211_local *local =
697 container_of(work, struct ieee80211_local, restart_work);
698
699 rtnl_lock();
700 ieee80211_reconfig(local);
701 rtnl_unlock();
702}
703
704void ieee80211_restart_hw(struct ieee80211_hw *hw)
705{
706 struct ieee80211_local *local = hw_to_local(hw);
707
708 /* use this reason, __ieee80211_resume will unblock it */
709 ieee80211_stop_queues_by_reason(hw,
710 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
711
712 schedule_work(&local->restart_work);
713}
714EXPORT_SYMBOL(ieee80211_restart_hw);
715
699struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 716struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
700 const struct ieee80211_ops *ops) 717 const struct ieee80211_ops *ops)
701{ 718{
@@ -718,9 +735,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
718 * +-------------------------+ 735 * +-------------------------+
719 * 736 *
720 */ 737 */
721 priv_size = ((sizeof(struct ieee80211_local) + 738 priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
722 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
723 priv_data_len;
724 739
725 wiphy = wiphy_new(&mac80211_config_ops, priv_size); 740 wiphy = wiphy_new(&mac80211_config_ops, priv_size);
726 741
@@ -728,17 +743,16 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
728 return NULL; 743 return NULL;
729 744
730 wiphy->privid = mac80211_wiphy_privid; 745 wiphy->privid = mac80211_wiphy_privid;
731 wiphy->max_scan_ssids = 4; 746
732 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 747 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
733 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) - 748 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
734 sizeof(struct cfg80211_bss); 749 sizeof(struct cfg80211_bss);
735 750
736 local = wiphy_priv(wiphy); 751 local = wiphy_priv(wiphy);
752
737 local->hw.wiphy = wiphy; 753 local->hw.wiphy = wiphy;
738 754
739 local->hw.priv = (char *)local + 755 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
740 ((sizeof(struct ieee80211_local) +
741 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
742 756
743 BUG_ON(!ops->tx); 757 BUG_ON(!ops->tx);
744 BUG_ON(!ops->start); 758 BUG_ON(!ops->start);
@@ -752,15 +766,14 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
752 /* set up some defaults */ 766 /* set up some defaults */
753 local->hw.queues = 1; 767 local->hw.queues = 1;
754 local->hw.max_rates = 1; 768 local->hw.max_rates = 1;
755 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 769 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
756 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 770 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
757 local->hw.conf.long_frame_max_tx_count = 4;
758 local->hw.conf.short_frame_max_tx_count = 7;
759 local->hw.conf.radio_enabled = true; 771 local->hw.conf.radio_enabled = true;
760 local->user_power_level = -1; 772 local->user_power_level = -1;
761 773
762 INIT_LIST_HEAD(&local->interfaces); 774 INIT_LIST_HEAD(&local->interfaces);
763 mutex_init(&local->iflist_mtx); 775 mutex_init(&local->iflist_mtx);
776 mutex_init(&local->scan_mtx);
764 777
765 spin_lock_init(&local->key_lock); 778 spin_lock_init(&local->key_lock);
766 779
@@ -768,6 +781,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
768 781
769 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 782 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
770 783
784 INIT_WORK(&local->restart_work, ieee80211_restart_work);
785
771 INIT_WORK(&local->dynamic_ps_enable_work, 786 INIT_WORK(&local->dynamic_ps_enable_work,
772 ieee80211_dynamic_ps_enable_work); 787 ieee80211_dynamic_ps_enable_work);
773 INIT_WORK(&local->dynamic_ps_disable_work, 788 INIT_WORK(&local->dynamic_ps_disable_work,
@@ -821,7 +836,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
821 enum ieee80211_band band; 836 enum ieee80211_band band;
822 struct net_device *mdev; 837 struct net_device *mdev;
823 struct ieee80211_master_priv *mpriv; 838 struct ieee80211_master_priv *mpriv;
824 int channels, i, j; 839 int channels, i, j, max_bitrates;
840 bool supp_ht;
841 static const u32 cipher_suites[] = {
842 WLAN_CIPHER_SUITE_WEP40,
843 WLAN_CIPHER_SUITE_WEP104,
844 WLAN_CIPHER_SUITE_TKIP,
845 WLAN_CIPHER_SUITE_CCMP,
846
847 /* keep last -- depends on hw flags! */
848 WLAN_CIPHER_SUITE_AES_CMAC
849 };
825 850
826 /* 851 /*
827 * generic code guarantees at least one band, 852 * generic code guarantees at least one band,
@@ -829,18 +854,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
829 * that hw.conf.channel is assigned 854 * that hw.conf.channel is assigned
830 */ 855 */
831 channels = 0; 856 channels = 0;
857 max_bitrates = 0;
858 supp_ht = false;
832 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 859 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
833 struct ieee80211_supported_band *sband; 860 struct ieee80211_supported_band *sband;
834 861
835 sband = local->hw.wiphy->bands[band]; 862 sband = local->hw.wiphy->bands[band];
836 if (sband && !local->oper_channel) { 863 if (!sband)
864 continue;
865 if (!local->oper_channel) {
837 /* init channel we're on */ 866 /* init channel we're on */
838 local->hw.conf.channel = 867 local->hw.conf.channel =
839 local->oper_channel = 868 local->oper_channel = &sband->channels[0];
840 local->scan_channel = &sband->channels[0]; 869 local->hw.conf.channel_type = NL80211_CHAN_NO_HT;
841 } 870 }
842 if (sband) 871 channels += sband->n_channels;
843 channels += sband->n_channels; 872
873 if (max_bitrates < sband->n_bitrates)
874 max_bitrates = sband->n_bitrates;
875 supp_ht = supp_ht || sband->ht_cap.ht_supported;
844 } 876 }
845 877
846 local->int_scan_req.n_channels = channels; 878 local->int_scan_req.n_channels = channels;
@@ -860,6 +892,37 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
860 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 892 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
861 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 893 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
862 894
895 /*
896 * Calculate scan IE length -- we need this to alloc
897 * memory and to subtract from the driver limit. It
898 * includes the (extended) supported rates and HT
899 * information -- SSID is the driver's responsibility.
900 */
901 local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */
902 if (supp_ht)
903 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
904
905 if (!local->ops->hw_scan) {
906 /* For hw_scan, driver needs to set these up. */
907 local->hw.wiphy->max_scan_ssids = 4;
908 local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
909 }
910
911 /*
912 * If the driver supports any scan IEs, then assume the
913 * limit includes the IEs mac80211 will add, otherwise
914 * leave it at zero and let the driver sort it out; we
915 * still pass our IEs to the driver but userspace will
916 * not be allowed to in that case.
917 */
918 if (local->hw.wiphy->max_scan_ie_len)
919 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
920
921 local->hw.wiphy->cipher_suites = cipher_suites;
922 local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
923 if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
924 local->hw.wiphy->n_cipher_suites--;
925
863 result = wiphy_register(local->hw.wiphy); 926 result = wiphy_register(local->hw.wiphy);
864 if (result < 0) 927 if (result < 0)
865 goto fail_wiphy_register; 928 goto fail_wiphy_register;
@@ -898,9 +961,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
898 961
899 debugfs_hw_add(local); 962 debugfs_hw_add(local);
900 963
901 if (local->hw.conf.beacon_int < 10)
902 local->hw.conf.beacon_int = 100;
903
904 if (local->hw.max_listen_interval == 0) 964 if (local->hw.max_listen_interval == 0)
905 local->hw.max_listen_interval = 1; 965 local->hw.max_listen_interval = 1;
906 966
@@ -965,25 +1025,38 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
965 } 1025 }
966 } 1026 }
967 1027
1028 local->network_latency_notifier.notifier_call =
1029 ieee80211_max_network_latency;
1030 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
1031 &local->network_latency_notifier);
1032
1033 if (result) {
1034 rtnl_lock();
1035 goto fail_pm_qos;
1036 }
1037
968 return 0; 1038 return 0;
969 1039
970fail_rate: 1040 fail_pm_qos:
1041 ieee80211_led_exit(local);
1042 ieee80211_remove_interfaces(local);
1043 fail_rate:
971 unregister_netdevice(local->mdev); 1044 unregister_netdevice(local->mdev);
972 local->mdev = NULL; 1045 local->mdev = NULL;
973fail_dev: 1046 fail_dev:
974 rtnl_unlock(); 1047 rtnl_unlock();
975 ieee80211_wep_free(local); 1048 ieee80211_wep_free(local);
976fail_wep: 1049 fail_wep:
977 sta_info_stop(local); 1050 sta_info_stop(local);
978fail_sta_info: 1051 fail_sta_info:
979 debugfs_hw_del(local); 1052 debugfs_hw_del(local);
980 destroy_workqueue(local->hw.workqueue); 1053 destroy_workqueue(local->hw.workqueue);
981fail_workqueue: 1054 fail_workqueue:
982 if (local->mdev) 1055 if (local->mdev)
983 free_netdev(local->mdev); 1056 free_netdev(local->mdev);
984fail_mdev_alloc: 1057 fail_mdev_alloc:
985 wiphy_unregister(local->hw.wiphy); 1058 wiphy_unregister(local->hw.wiphy);
986fail_wiphy_register: 1059 fail_wiphy_register:
987 kfree(local->int_scan_req.channels); 1060 kfree(local->int_scan_req.channels);
988 return result; 1061 return result;
989} 1062}
@@ -996,6 +1069,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
996 tasklet_kill(&local->tx_pending_tasklet); 1069 tasklet_kill(&local->tx_pending_tasklet);
997 tasklet_kill(&local->tasklet); 1070 tasklet_kill(&local->tasklet);
998 1071
1072 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
1073 &local->network_latency_notifier);
1074
999 rtnl_lock(); 1075 rtnl_lock();
1000 1076
1001 /* 1077 /*
@@ -1038,6 +1114,7 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
1038 struct ieee80211_local *local = hw_to_local(hw); 1114 struct ieee80211_local *local = hw_to_local(hw);
1039 1115
1040 mutex_destroy(&local->iflist_mtx); 1116 mutex_destroy(&local->iflist_mtx);
1117 mutex_destroy(&local->scan_mtx);
1041 1118
1042 wiphy_free(local->hw.wiphy); 1119 wiphy_free(local->hw.wiphy);
1043} 1120}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 9a3e5de0410a..fc712e60705d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -21,6 +21,9 @@
21#define CAPAB_OFFSET 17 21#define CAPAB_OFFSET 17
22#define ACCEPT_PLINKS 0x80 22#define ACCEPT_PLINKS 0x80
23 23
24#define TMR_RUNNING_HK 0
25#define TMR_RUNNING_MP 1
26
24int mesh_allocated; 27int mesh_allocated;
25static struct kmem_cache *rm_cache; 28static struct kmem_cache *rm_cache;
26 29
@@ -45,6 +48,12 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
45 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 48 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
46 49
47 ifmsh->housekeeping = true; 50 ifmsh->housekeeping = true;
51
52 if (local->quiescing) {
53 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
54 return;
55 }
56
48 queue_work(local->hw.workqueue, &ifmsh->work); 57 queue_work(local->hw.workqueue, &ifmsh->work);
49} 58}
50 59
@@ -343,6 +352,11 @@ static void ieee80211_mesh_path_timer(unsigned long data)
343 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 352 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
344 struct ieee80211_local *local = sdata->local; 353 struct ieee80211_local *local = sdata->local;
345 354
355 if (local->quiescing) {
356 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
357 return;
358 }
359
346 queue_work(local->hw.workqueue, &ifmsh->work); 360 queue_work(local->hw.workqueue, &ifmsh->work);
347} 361}
348 362
@@ -417,13 +431,39 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
417 431
418 free_plinks = mesh_plink_availables(sdata); 432 free_plinks = mesh_plink_availables(sdata);
419 if (free_plinks != sdata->u.mesh.accepting_plinks) 433 if (free_plinks != sdata->u.mesh.accepting_plinks)
420 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 434 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
421 435
422 ifmsh->housekeeping = false; 436 ifmsh->housekeeping = false;
423 mod_timer(&ifmsh->housekeeping_timer, 437 mod_timer(&ifmsh->housekeeping_timer,
424 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 438 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
425} 439}
426 440
441#ifdef CONFIG_PM
442void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
443{
444 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
445
446 /* might restart the timer but that doesn't matter */
447 cancel_work_sync(&ifmsh->work);
448
449 /* use atomic bitops in case both timers fire at the same time */
450
451 if (del_timer_sync(&ifmsh->housekeeping_timer))
452 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
453 if (del_timer_sync(&ifmsh->mesh_path_timer))
454 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
455}
456
457void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
458{
459 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
460
461 if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running))
462 add_timer(&ifmsh->housekeeping_timer);
463 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
464 add_timer(&ifmsh->mesh_path_timer);
465}
466#endif
427 467
428void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) 468void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
429{ 469{
@@ -432,8 +472,8 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
432 472
433 ifmsh->housekeeping = true; 473 ifmsh->housekeeping = true;
434 queue_work(local->hw.workqueue, &ifmsh->work); 474 queue_work(local->hw.workqueue, &ifmsh->work);
435 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON | 475 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
436 IEEE80211_IFCC_BEACON_ENABLED); 476 BSS_CHANGED_BEACON_ENABLED);
437} 477}
438 478
439void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 479void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index d891d7ddccd7..c7d72819cdd2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -191,12 +191,8 @@ struct mesh_rmc {
191#define PLINK_CATEGORY 30 191#define PLINK_CATEGORY 30
192#define MESH_PATH_SEL_CATEGORY 32 192#define MESH_PATH_SEL_CATEGORY 32
193 193
194/* Mesh Header Flags */
195#define IEEE80211S_FLAGS_AE 0x3
196
197/* Public interfaces */ 194/* Public interfaces */
198/* Various */ 195/* Various */
199int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
200int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 196int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
201 struct ieee80211_sub_if_data *sdata); 197 struct ieee80211_sub_if_data *sdata);
202int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 198int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
@@ -267,6 +263,8 @@ void mesh_path_timer(unsigned long data);
267void mesh_path_flush_by_nexthop(struct sta_info *sta); 263void mesh_path_flush_by_nexthop(struct sta_info *sta);
268void mesh_path_discard_frame(struct sk_buff *skb, 264void mesh_path_discard_frame(struct sk_buff *skb,
269 struct ieee80211_sub_if_data *sdata); 265 struct ieee80211_sub_if_data *sdata);
266void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
267void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
270 268
271#ifdef CONFIG_MAC80211_MESH 269#ifdef CONFIG_MAC80211_MESH
272extern int mesh_allocated; 270extern int mesh_allocated;
@@ -294,10 +292,20 @@ static inline void mesh_path_activate(struct mesh_path *mpath)
294 292
295void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 293void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
296 294
295void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
296void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
297void mesh_plink_quiesce(struct sta_info *sta);
298void mesh_plink_restart(struct sta_info *sta);
297#else 299#else
298#define mesh_allocated 0 300#define mesh_allocated 0
299static inline void 301static inline void
300ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} 302ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
303static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
304{}
305static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
306{}
307static inline void mesh_plink_quiesce(struct sta_info *sta) {}
308static inline void mesh_plink_restart(struct sta_info *sta) {}
301#endif 309#endif
302 310
303#endif /* IEEE80211S_H */ 311#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 60b35accda91..003cb470ac84 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -836,8 +836,14 @@ void mesh_path_timer(unsigned long data)
836 mpath = rcu_dereference(mpath); 836 mpath = rcu_dereference(mpath);
837 if (!mpath) 837 if (!mpath)
838 goto endmpathtimer; 838 goto endmpathtimer;
839 spin_lock_bh(&mpath->state_lock);
840 sdata = mpath->sdata; 839 sdata = mpath->sdata;
840
841 if (sdata->local->quiescing) {
842 rcu_read_unlock();
843 return;
844 }
845
846 spin_lock_bh(&mpath->state_lock);
841 if (mpath->flags & MESH_PATH_RESOLVED || 847 if (mpath->flags & MESH_PATH_RESOLVED ||
842 (!(mpath->flags & MESH_PATH_RESOLVING))) 848 (!(mpath->flags & MESH_PATH_RESOLVING)))
843 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 849 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a8bbdeca013a..cb14253587f1 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -266,6 +266,11 @@ static void mesh_plink_timer(unsigned long data)
266 */ 266 */
267 sta = (struct sta_info *) data; 267 sta = (struct sta_info *) data;
268 268
269 if (sta->sdata->local->quiescing) {
270 sta->plink_timer_was_running = true;
271 return;
272 }
273
269 spin_lock_bh(&sta->lock); 274 spin_lock_bh(&sta->lock);
270 if (sta->ignore_plink_timer) { 275 if (sta->ignore_plink_timer) {
271 sta->ignore_plink_timer = false; 276 sta->ignore_plink_timer = false;
@@ -322,6 +327,22 @@ static void mesh_plink_timer(unsigned long data)
322 } 327 }
323} 328}
324 329
330#ifdef CONFIG_PM
331void mesh_plink_quiesce(struct sta_info *sta)
332{
333 if (del_timer_sync(&sta->plink_timer))
334 sta->plink_timer_was_running = true;
335}
336
337void mesh_plink_restart(struct sta_info *sta)
338{
339 if (sta->plink_timer_was_running) {
340 add_timer(&sta->plink_timer);
341 sta->plink_timer_was_running = false;
342 }
343}
344#endif
345
325static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) 346static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
326{ 347{
327 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); 348 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 132938b073dc..509469cb9265 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -17,10 +17,13 @@
17#include <linux/if_arp.h> 17#include <linux/if_arp.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/pm_qos_params.h>
21#include <linux/crc32.h>
20#include <net/mac80211.h> 22#include <net/mac80211.h>
21#include <asm/unaligned.h> 23#include <asm/unaligned.h>
22 24
23#include "ieee80211_i.h" 25#include "ieee80211_i.h"
26#include "driver-ops.h"
24#include "rate.h" 27#include "rate.h"
25#include "led.h" 28#include "led.h"
26 29
@@ -30,9 +33,13 @@
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 33#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3 34#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 35#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
36#define IEEE80211_PROBE_WAIT (HZ / 5)
33#define IEEE80211_PROBE_IDLE_TIME (60 * HZ) 37#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
34#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 38#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
35 39
40#define TMR_RUNNING_TIMER 0
41#define TMR_RUNNING_CHANSW 1
42
36/* utils */ 43/* utils */
37static int ecw2cw(int ecw) 44static int ecw2cw(int ecw)
38{ 45{
@@ -80,6 +87,92 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
80 return count; 87 return count;
81} 88}
82 89
90/*
91 * ieee80211_enable_ht should be called only after the operating band
92 * has been determined as ht configuration depends on the hw's
93 * HT abilities for a specific band.
94 */
95static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
96 struct ieee80211_ht_info *hti,
97 u16 ap_ht_cap_flags)
98{
99 struct ieee80211_local *local = sdata->local;
100 struct ieee80211_supported_band *sband;
101 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
102 struct sta_info *sta;
103 u32 changed = 0;
104 u16 ht_opmode;
105 bool enable_ht = true, ht_changed;
106 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
107
108 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
109
110 /* HT is not supported */
111 if (!sband->ht_cap.ht_supported)
112 enable_ht = false;
113
114 /* check that channel matches the right operating channel */
115 if (local->hw.conf.channel->center_freq !=
116 ieee80211_channel_to_frequency(hti->control_chan))
117 enable_ht = false;
118
119 if (enable_ht) {
120 channel_type = NL80211_CHAN_HT20;
121
122 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
123 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
124 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
125 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
126 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
127 if (!(local->hw.conf.channel->flags &
128 IEEE80211_CHAN_NO_HT40PLUS))
129 channel_type = NL80211_CHAN_HT40PLUS;
130 break;
131 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
132 if (!(local->hw.conf.channel->flags &
133 IEEE80211_CHAN_NO_HT40MINUS))
134 channel_type = NL80211_CHAN_HT40MINUS;
135 break;
136 }
137 }
138 }
139
140 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
141 channel_type != local->hw.conf.channel_type;
142
143 local->oper_channel_type = channel_type;
144
145 if (ht_changed) {
146 /* channel_type change automatically detected */
147 ieee80211_hw_config(local, 0);
148
149 rcu_read_lock();
150
151 sta = sta_info_get(local, ifmgd->bssid);
152 if (sta)
153 rate_control_rate_update(local, sband, sta,
154 IEEE80211_RC_HT_CHANGED);
155
156 rcu_read_unlock();
157 }
158
159 /* disable HT */
160 if (!enable_ht)
161 return 0;
162
163 ht_opmode = le16_to_cpu(hti->operation_mode);
164
165 /* if bss configuration changed store the new one */
166 if (!sdata->ht_opmode_valid ||
167 sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
168 changed |= BSS_CHANGED_HT;
169 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
170 sdata->ht_opmode_valid = true;
171 }
172
173 return changed;
174}
175
83/* frame sending functions */ 176/* frame sending functions */
84 177
85static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 178static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
@@ -263,13 +356,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
263 356
264 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 357 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
265 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 358 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
266 if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) { 359 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
267 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 360 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
268 cap &= ~IEEE80211_HT_CAP_SGI_40; 361 cap &= ~IEEE80211_HT_CAP_SGI_40;
269 } 362 }
270 break; 363 break;
271 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 364 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
272 if (flags & IEEE80211_CHAN_NO_FAT_BELOW) { 365 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
273 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 366 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
274 cap &= ~IEEE80211_HT_CAP_SGI_40; 367 cap &= ~IEEE80211_HT_CAP_SGI_40;
275 } 368 }
@@ -325,6 +418,10 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
325 /* u.deauth.reason_code == u.disassoc.reason_code */ 418 /* u.deauth.reason_code == u.disassoc.reason_code */
326 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 419 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
327 420
421 if (stype == IEEE80211_STYPE_DEAUTH)
422 cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, skb->len);
423 else
424 cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, skb->len);
328 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); 425 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
329} 426}
330 427
@@ -359,6 +456,278 @@ void ieee80211_send_pspoll(struct ieee80211_local *local,
359 ieee80211_tx_skb(sdata, skb, 0); 456 ieee80211_tx_skb(sdata, skb, 0);
360} 457}
361 458
459void ieee80211_send_nullfunc(struct ieee80211_local *local,
460 struct ieee80211_sub_if_data *sdata,
461 int powersave)
462{
463 struct sk_buff *skb;
464 struct ieee80211_hdr *nullfunc;
465 __le16 fc;
466
467 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
468 return;
469
470 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
471 if (!skb) {
472 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
473 "frame\n", sdata->dev->name);
474 return;
475 }
476 skb_reserve(skb, local->hw.extra_tx_headroom);
477
478 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
479 memset(nullfunc, 0, 24);
480 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
481 IEEE80211_FCTL_TODS);
482 if (powersave)
483 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
484 nullfunc->frame_control = fc;
485 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
486 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
487 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
488
489 ieee80211_tx_skb(sdata, skb, 0);
490}
491
492/* spectrum management related things */
493static void ieee80211_chswitch_work(struct work_struct *work)
494{
495 struct ieee80211_sub_if_data *sdata =
496 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
497 struct ieee80211_bss *bss;
498 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
499
500 if (!netif_running(sdata->dev))
501 return;
502
503 bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
504 sdata->local->hw.conf.channel->center_freq,
505 ifmgd->ssid, ifmgd->ssid_len);
506 if (!bss)
507 goto exit;
508
509 sdata->local->oper_channel = sdata->local->csa_channel;
510 /* XXX: shouldn't really modify cfg80211-owned data! */
511 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
512 bss->cbss.channel = sdata->local->oper_channel;
513
514 ieee80211_rx_bss_put(sdata->local, bss);
515exit:
516 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
517 ieee80211_wake_queues_by_reason(&sdata->local->hw,
518 IEEE80211_QUEUE_STOP_REASON_CSA);
519}
520
521static void ieee80211_chswitch_timer(unsigned long data)
522{
523 struct ieee80211_sub_if_data *sdata =
524 (struct ieee80211_sub_if_data *) data;
525 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
526
527 if (sdata->local->quiescing) {
528 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
529 return;
530 }
531
532 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
533}
534
535void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
536 struct ieee80211_channel_sw_ie *sw_elem,
537 struct ieee80211_bss *bss)
538{
539 struct ieee80211_channel *new_ch;
540 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
541 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
542
543 if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
544 return;
545
546 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
547 return;
548
549 /* Disregard subsequent beacons if we are already running a timer
550 processing a CSA */
551
552 if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
553 return;
554
555 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
556 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
557 return;
558
559 sdata->local->csa_channel = new_ch;
560
561 if (sw_elem->count <= 1) {
562 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
563 } else {
564 ieee80211_stop_queues_by_reason(&sdata->local->hw,
565 IEEE80211_QUEUE_STOP_REASON_CSA);
566 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
567 mod_timer(&ifmgd->chswitch_timer,
568 jiffies +
569 msecs_to_jiffies(sw_elem->count *
570 bss->cbss.beacon_interval));
571 }
572}
573
574static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
575 u16 capab_info, u8 *pwr_constr_elem,
576 u8 pwr_constr_elem_len)
577{
578 struct ieee80211_conf *conf = &sdata->local->hw.conf;
579
580 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
581 return;
582
583 /* Power constraint IE length should be 1 octet */
584 if (pwr_constr_elem_len != 1)
585 return;
586
587 if ((*pwr_constr_elem <= conf->channel->max_power) &&
588 (*pwr_constr_elem != sdata->local->power_constr_level)) {
589 sdata->local->power_constr_level = *pwr_constr_elem;
590 ieee80211_hw_config(sdata->local, 0);
591 }
592}
593
594/* powersave */
595static void ieee80211_enable_ps(struct ieee80211_local *local,
596 struct ieee80211_sub_if_data *sdata)
597{
598 struct ieee80211_conf *conf = &local->hw.conf;
599
600 /*
601 * If we are scanning right now then the parameters will
602 * take effect when scan finishes.
603 */
604 if (local->hw_scanning || local->sw_scanning)
605 return;
606
607 if (conf->dynamic_ps_timeout > 0 &&
608 !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
609 mod_timer(&local->dynamic_ps_timer, jiffies +
610 msecs_to_jiffies(conf->dynamic_ps_timeout));
611 } else {
612 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
613 ieee80211_send_nullfunc(local, sdata, 1);
614 conf->flags |= IEEE80211_CONF_PS;
615 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
616 }
617}
618
619static void ieee80211_change_ps(struct ieee80211_local *local)
620{
621 struct ieee80211_conf *conf = &local->hw.conf;
622
623 if (local->ps_sdata) {
624 if (!(local->ps_sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED))
625 return;
626
627 ieee80211_enable_ps(local, local->ps_sdata);
628 } else if (conf->flags & IEEE80211_CONF_PS) {
629 conf->flags &= ~IEEE80211_CONF_PS;
630 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
631 del_timer_sync(&local->dynamic_ps_timer);
632 cancel_work_sync(&local->dynamic_ps_enable_work);
633 }
634}
635
636/* need to hold RTNL or interface lock */
637void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
638{
639 struct ieee80211_sub_if_data *sdata, *found = NULL;
640 int count = 0;
641
642 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
643 local->ps_sdata = NULL;
644 return;
645 }
646
647 list_for_each_entry(sdata, &local->interfaces, list) {
648 if (!netif_running(sdata->dev))
649 continue;
650 if (sdata->vif.type != NL80211_IFTYPE_STATION)
651 continue;
652 found = sdata;
653 count++;
654 }
655
656 if (count == 1 && found->u.mgd.powersave) {
657 s32 beaconint_us;
658
659 if (latency < 0)
660 latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY);
661
662 beaconint_us = ieee80211_tu_to_usec(
663 found->vif.bss_conf.beacon_int);
664
665 if (beaconint_us > latency) {
666 local->ps_sdata = NULL;
667 } else {
668 u8 dtimper = found->vif.bss_conf.dtim_period;
669 int maxslp = 1;
670
671 if (dtimper > 1)
672 maxslp = min_t(int, dtimper,
673 latency / beaconint_us);
674
675 local->hw.conf.max_sleep_period = maxslp;
676 local->ps_sdata = found;
677 }
678 } else {
679 local->ps_sdata = NULL;
680 }
681
682 ieee80211_change_ps(local);
683}
684
685void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
686{
687 struct ieee80211_local *local =
688 container_of(work, struct ieee80211_local,
689 dynamic_ps_disable_work);
690
691 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
692 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
693 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
694 }
695
696 ieee80211_wake_queues_by_reason(&local->hw,
697 IEEE80211_QUEUE_STOP_REASON_PS);
698}
699
700void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
701{
702 struct ieee80211_local *local =
703 container_of(work, struct ieee80211_local,
704 dynamic_ps_enable_work);
705 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
706
707 /* can only happen when PS was just disabled anyway */
708 if (!sdata)
709 return;
710
711 if (local->hw.conf.flags & IEEE80211_CONF_PS)
712 return;
713
714 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
715 ieee80211_send_nullfunc(local, sdata, 1);
716
717 local->hw.conf.flags |= IEEE80211_CONF_PS;
718 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
719}
720
721void ieee80211_dynamic_ps_timer(unsigned long data)
722{
723 struct ieee80211_local *local = (void *) data;
724
725 if (local->quiescing)
726 return;
727
728 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
729}
730
362/* MLME */ 731/* MLME */
363static void ieee80211_sta_wmm_params(struct ieee80211_local *local, 732static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
364 struct ieee80211_if_managed *ifmgd, 733 struct ieee80211_if_managed *ifmgd,
@@ -427,38 +796,13 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
427 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min, 796 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min,
428 params.cw_max, params.txop); 797 params.cw_max, params.txop);
429#endif 798#endif
430 if (local->ops->conf_tx && 799 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
431 local->ops->conf_tx(local_to_hw(local), queue, &params)) {
432 printk(KERN_DEBUG "%s: failed to set TX queue " 800 printk(KERN_DEBUG "%s: failed to set TX queue "
433 "parameters for queue %d\n", local->mdev->name, queue); 801 "parameters for queue %d\n", local->mdev->name,
434 } 802 queue);
435 } 803 }
436} 804}
437 805
438static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
439{
440 u8 mask;
441 u8 index, indexn1, indexn2;
442 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
443
444 if (unlikely(!tim || elems->tim_len < 4))
445 return false;
446
447 aid &= 0x3fff;
448 index = aid / 8;
449 mask = 1 << (aid & 7);
450
451 indexn1 = tim->bitmap_ctrl & 0xfe;
452 indexn2 = elems->tim_len + indexn1 - 4;
453
454 if (index < indexn1 || index > indexn2)
455 return false;
456
457 index -= indexn1;
458
459 return !!(tim->virtual_map[index] & mask);
460}
461
462static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 806static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
463 u16 capab, bool erp_valid, u8 erp) 807 u16 capab, bool erp_valid, u8 erp)
464{ 808{
@@ -610,6 +954,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
610 sdata->vif.bss_conf.timestamp = bss->cbss.tsf; 954 sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
611 sdata->vif.bss_conf.dtim_period = bss->dtim_period; 955 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
612 956
957 bss_info_changed |= BSS_CHANGED_BEACON_INT;
613 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 958 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
614 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 959 bss->cbss.capability, bss->has_erp_value, bss->erp_value);
615 960
@@ -632,20 +977,17 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
632 * changed or not. 977 * changed or not.
633 */ 978 */
634 bss_info_changed |= BSS_CHANGED_BASIC_RATES; 979 bss_info_changed |= BSS_CHANGED_BASIC_RATES;
980
981 /* And the BSSID changed - we're associated now */
982 bss_info_changed |= BSS_CHANGED_BSSID;
983
635 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 984 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
636 985
637 if (local->powersave) { 986 /* will be same as sdata */
638 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) && 987 if (local->ps_sdata) {
639 local->hw.conf.dynamic_ps_timeout > 0) { 988 mutex_lock(&local->iflist_mtx);
640 mod_timer(&local->dynamic_ps_timer, jiffies + 989 ieee80211_recalc_ps(local, -1);
641 msecs_to_jiffies( 990 mutex_unlock(&local->iflist_mtx);
642 local->hw.conf.dynamic_ps_timeout));
643 } else {
644 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
645 ieee80211_send_nullfunc(local, sdata, 1);
646 conf->flags |= IEEE80211_CONF_PS;
647 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
648 }
649 } 991 }
650 992
651 netif_tx_start_all_queues(sdata->dev); 993 netif_tx_start_all_queues(sdata->dev);
@@ -664,7 +1006,8 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
664 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n", 1006 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
665 sdata->dev->name, ifmgd->bssid); 1007 sdata->dev->name, ifmgd->bssid);
666 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1008 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
667 ieee80211_sta_send_apinfo(sdata); 1009 ieee80211_recalc_idle(local);
1010 cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid);
668 1011
669 /* 1012 /*
670 * Most likely AP is not in the range so remove the 1013 * Most likely AP is not in the range so remove the
@@ -689,8 +1032,6 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
689 1032
690 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; 1033 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
691 1034
692 set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifmgd->request);
693
694 /* Direct probe is sent to broadcast address as some APs 1035 /* Direct probe is sent to broadcast address as some APs
695 * will not answer to direct packet in unassociated state. 1036 * will not answer to direct packet in unassociated state.
696 */ 1037 */
@@ -714,7 +1055,8 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
714 " timed out\n", 1055 " timed out\n",
715 sdata->dev->name, ifmgd->bssid); 1056 sdata->dev->name, ifmgd->bssid);
716 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1057 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
717 ieee80211_sta_send_apinfo(sdata); 1058 ieee80211_recalc_idle(local);
1059 cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid);
718 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 1060 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
719 sdata->local->hw.conf.channel->center_freq, 1061 sdata->local->hw.conf.channel->center_freq,
720 ifmgd->ssid, ifmgd->ssid_len); 1062 ifmgd->ssid, ifmgd->ssid_len);
@@ -817,9 +1159,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
817 1159
818 rcu_read_unlock(); 1160 rcu_read_unlock();
819 1161
1162 ieee80211_set_wmm_default(sdata);
1163
1164 ieee80211_recalc_idle(local);
1165
820 /* channel(_type) changes are handled by ieee80211_hw_config */ 1166 /* channel(_type) changes are handled by ieee80211_hw_config */
821 local->oper_channel_type = NL80211_CHAN_NO_HT; 1167 local->oper_channel_type = NL80211_CHAN_NO_HT;
822 1168
1169 /* on the next assoc, re-program HT parameters */
1170 sdata->ht_opmode_valid = false;
1171
823 local->power_constr_level = 0; 1172 local->power_constr_level = 0;
824 1173
825 del_timer_sync(&local->dynamic_ps_timer); 1174 del_timer_sync(&local->dynamic_ps_timer);
@@ -831,6 +1180,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
831 } 1180 }
832 1181
833 ieee80211_hw_config(local, config_changed); 1182 ieee80211_hw_config(local, config_changed);
1183
1184 /* And the BSSID changed -- not very interesting here */
1185 changed |= BSS_CHANGED_BSSID;
834 ieee80211_bss_info_change_notify(sdata, changed); 1186 ieee80211_bss_info_change_notify(sdata, changed);
835 1187
836 rcu_read_lock(); 1188 rcu_read_lock();
@@ -897,7 +1249,8 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
897 " timed out\n", 1249 " timed out\n",
898 sdata->dev->name, ifmgd->bssid); 1250 sdata->dev->name, ifmgd->bssid);
899 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1251 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
900 ieee80211_sta_send_apinfo(sdata); 1252 ieee80211_recalc_idle(local);
1253 cfg80211_send_assoc_timeout(sdata->dev, ifmgd->bssid);
901 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 1254 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
902 sdata->local->hw.conf.channel->center_freq, 1255 sdata->local->hw.conf.channel->center_freq,
903 ifmgd->ssid, ifmgd->ssid_len); 1256 ifmgd->ssid, ifmgd->ssid_len);
@@ -917,6 +1270,7 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
917 printk(KERN_DEBUG "%s: mismatch in privacy configuration and " 1270 printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
918 "mixed-cell disabled - abort association\n", sdata->dev->name); 1271 "mixed-cell disabled - abort association\n", sdata->dev->name);
919 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1272 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1273 ieee80211_recalc_idle(local);
920 return; 1274 return;
921 } 1275 }
922 1276
@@ -948,6 +1302,17 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
948 u.mgd.beacon_loss_work); 1302 u.mgd.beacon_loss_work);
949 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1303 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
950 1304
1305 /*
1306 * The driver has already reported this event and we have
1307 * already sent a probe request. Maybe the AP died and the
1308 * driver keeps reporting until we disassociate... We have
1309 * to ignore that because otherwise we would continually
1310 * reset the timer and never check whether we received a
1311 * probe response!
1312 */
1313 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL)
1314 return;
1315
951#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1316#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
952 if (net_ratelimit()) { 1317 if (net_ratelimit()) {
953 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " 1318 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
@@ -960,7 +1325,7 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
960 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1325 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
961 ifmgd->ssid_len, NULL, 0); 1326 ifmgd->ssid_len, NULL, 0);
962 1327
963 mod_timer(&ifmgd->timer, jiffies + IEEE80211_MONITORING_INTERVAL); 1328 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
964} 1329}
965 1330
966void ieee80211_beacon_loss(struct ieee80211_vif *vif) 1331void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -997,7 +1362,7 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
997 } 1362 }
998 1363
999 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && 1364 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
1000 time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { 1365 time_after(jiffies, sta->last_rx + IEEE80211_PROBE_WAIT)) {
1001 printk(KERN_DEBUG "%s: no probe response from AP %pM " 1366 printk(KERN_DEBUG "%s: no probe response from AP %pM "
1002 "- disassociating\n", 1367 "- disassociating\n",
1003 sdata->dev->name, ifmgd->bssid); 1368 sdata->dev->name, ifmgd->bssid);
@@ -1024,8 +1389,8 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1024 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1389 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1025 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1390 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1026 ifmgd->ssid_len, NULL, 0); 1391 ifmgd->ssid_len, NULL, 0);
1392 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
1027 goto unlock; 1393 goto unlock;
1028
1029 } 1394 }
1030 1395
1031 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { 1396 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) {
@@ -1034,15 +1399,16 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1034 ifmgd->ssid_len, NULL, 0); 1399 ifmgd->ssid_len, NULL, 0);
1035 } 1400 }
1036 1401
1402 if (!disassoc)
1403 mod_timer(&ifmgd->timer,
1404 jiffies + IEEE80211_MONITORING_INTERVAL);
1405
1037 unlock: 1406 unlock:
1038 rcu_read_unlock(); 1407 rcu_read_unlock();
1039 1408
1040 if (disassoc) 1409 if (disassoc)
1041 ieee80211_set_disassoc(sdata, true, true, 1410 ieee80211_set_disassoc(sdata, true, true,
1042 WLAN_REASON_PREV_AUTH_NOT_VALID); 1411 WLAN_REASON_PREV_AUTH_NOT_VALID);
1043 else
1044 mod_timer(&ifmgd->timer, jiffies +
1045 IEEE80211_MONITORING_INTERVAL);
1046} 1412}
1047 1413
1048 1414
@@ -1055,6 +1421,7 @@ static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata)
1055 if (ifmgd->flags & IEEE80211_STA_EXT_SME) { 1421 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
1056 /* Wait for SME to request association */ 1422 /* Wait for SME to request association */
1057 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 1423 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1424 ieee80211_recalc_idle(sdata->local);
1058 } else 1425 } else
1059 ieee80211_associate(sdata); 1426 ieee80211_associate(sdata);
1060} 1427}
@@ -1187,7 +1554,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1187 1554
1188 ieee80211_set_disassoc(sdata, true, false, 0); 1555 ieee80211_set_disassoc(sdata, true, false, 0);
1189 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED; 1556 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED;
1190 cfg80211_send_rx_deauth(sdata->dev, (u8 *) mgmt, len); 1557 cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, len);
1191} 1558}
1192 1559
1193 1560
@@ -1218,7 +1585,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1218 } 1585 }
1219 1586
1220 ieee80211_set_disassoc(sdata, false, false, reason_code); 1587 ieee80211_set_disassoc(sdata, false, false, reason_code);
1221 cfg80211_send_rx_disassoc(sdata->dev, (u8 *) mgmt, len); 1588 cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, len);
1222} 1589}
1223 1590
1224 1591
@@ -1287,6 +1654,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1287 * association next time. This works around some broken APs 1654 * association next time. This works around some broken APs
1288 * which do not correctly reject reassociation requests. */ 1655 * which do not correctly reject reassociation requests. */
1289 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET; 1656 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
1657 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
1658 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
1659 /* Wait for SME to decide what to do next */
1660 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1661 ieee80211_recalc_idle(local);
1662 }
1290 return; 1663 return;
1291 } 1664 }
1292 1665
@@ -1340,8 +1713,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1340 * to between the sta_info_alloc() and sta_info_insert() above. 1713 * to between the sta_info_alloc() and sta_info_insert() above.
1341 */ 1714 */
1342 1715
1343 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | 1716 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP);
1344 WLAN_STA_AUTHORIZED); 1717 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1718 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1345 1719
1346 rates = 0; 1720 rates = 0;
1347 basic_rates = 0; 1721 basic_rates = 0;
@@ -1421,6 +1795,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1421 if (elems.wmm_param) 1795 if (elems.wmm_param)
1422 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1796 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1423 elems.wmm_param_len); 1797 elems.wmm_param_len);
1798 else
1799 ieee80211_set_wmm_default(sdata);
1424 1800
1425 if (elems.ht_info_elem && elems.wmm_param && 1801 if (elems.ht_info_elem && elems.wmm_param &&
1426 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && 1802 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
@@ -1476,7 +1852,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1476 (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) { 1852 (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) {
1477 struct ieee80211_channel_sw_ie *sw_elem = 1853 struct ieee80211_channel_sw_ie *sw_elem =
1478 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1854 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
1479 ieee80211_process_chanswitch(sdata, sw_elem, bss); 1855 ieee80211_sta_process_chanswitch(sdata, sw_elem, bss);
1480 } 1856 }
1481 1857
1482 ieee80211_rx_bss_put(local, bss); 1858 ieee80211_rx_bss_put(local, bss);
@@ -1507,8 +1883,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1507 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1883 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1508 1884
1509 /* direct probe may be part of the association flow */ 1885 /* direct probe may be part of the association flow */
1510 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, 1886 if (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE) {
1511 &ifmgd->request)) {
1512 printk(KERN_DEBUG "%s direct probe responded\n", 1887 printk(KERN_DEBUG "%s direct probe responded\n",
1513 sdata->dev->name); 1888 sdata->dev->name);
1514 ieee80211_authenticate(sdata); 1889 ieee80211_authenticate(sdata);
@@ -1518,46 +1893,81 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1518 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; 1893 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1519} 1894}
1520 1895
1896/*
1897 * This is the canonical list of information elements we care about,
1898 * the filter code also gives us all changes to the Microsoft OUI
1899 * (00:50:F2) vendor IE which is used for WMM which we need to track.
1900 *
1901 * We implement beacon filtering in software since that means we can
1902 * avoid processing the frame here and in cfg80211, and userspace
1903 * will not be able to tell whether the hardware supports it or not.
1904 *
1905 * XXX: This list needs to be dynamic -- userspace needs to be able to
1906 * add items it requires. It also needs to be able to tell us to
1907 * look out for other vendor IEs.
1908 */
1909static const u64 care_about_ies =
1910 (1ULL << WLAN_EID_COUNTRY) |
1911 (1ULL << WLAN_EID_ERP_INFO) |
1912 (1ULL << WLAN_EID_CHANNEL_SWITCH) |
1913 (1ULL << WLAN_EID_PWR_CONSTRAINT) |
1914 (1ULL << WLAN_EID_HT_CAPABILITY) |
1915 (1ULL << WLAN_EID_HT_INFORMATION);
1916
1521static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 1917static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1522 struct ieee80211_mgmt *mgmt, 1918 struct ieee80211_mgmt *mgmt,
1523 size_t len, 1919 size_t len,
1524 struct ieee80211_rx_status *rx_status) 1920 struct ieee80211_rx_status *rx_status)
1525{ 1921{
1526 struct ieee80211_if_managed *ifmgd; 1922 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1527 size_t baselen; 1923 size_t baselen;
1528 struct ieee802_11_elems elems; 1924 struct ieee802_11_elems elems;
1529 struct ieee80211_local *local = sdata->local; 1925 struct ieee80211_local *local = sdata->local;
1530 u32 changed = 0; 1926 u32 changed = 0;
1531 bool erp_valid, directed_tim; 1927 bool erp_valid, directed_tim = false;
1532 u8 erp_value = 0; 1928 u8 erp_value = 0;
1929 u32 ncrc;
1533 1930
1534 /* Process beacon from the current BSS */ 1931 /* Process beacon from the current BSS */
1535 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 1932 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
1536 if (baselen > len) 1933 if (baselen > len)
1537 return; 1934 return;
1538 1935
1539 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 1936 if (rx_status->freq != local->hw.conf.channel->center_freq)
1540
1541 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
1542
1543 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1544 return; 1937 return;
1545 1938
1546 ifmgd = &sdata->u.mgd;
1547
1548 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED) || 1939 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED) ||
1549 memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0) 1940 memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0)
1550 return; 1941 return;
1551 1942
1552 if (rx_status->freq != local->hw.conf.channel->center_freq) 1943 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
1553 return; 1944#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1945 if (net_ratelimit()) {
1946 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1947 "to a received beacon\n", sdata->dev->name);
1948 }
1949#endif
1950 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1951 }
1554 1952
1555 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1953 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
1556 elems.wmm_param_len); 1954 ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
1955 len - baselen, &elems,
1956 care_about_ies, ncrc);
1557 1957
1558 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 1958 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
1559 directed_tim = ieee80211_check_tim(&elems, ifmgd->aid); 1959 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
1960 ifmgd->aid);
1560 1961
1962 if (ncrc != ifmgd->beacon_crc) {
1963 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
1964 true);
1965
1966 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1967 elems.wmm_param_len);
1968 }
1969
1970 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
1561 if (directed_tim) { 1971 if (directed_tim) {
1562 if (local->hw.conf.dynamic_ps_timeout > 0) { 1972 if (local->hw.conf.dynamic_ps_timeout > 0) {
1563 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1973 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
@@ -1580,6 +1990,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1580 } 1990 }
1581 } 1991 }
1582 1992
1993 if (ncrc == ifmgd->beacon_crc)
1994 return;
1995 ifmgd->beacon_crc = ncrc;
1996
1583 if (elems.erp_info && elems.erp_info_len >= 1) { 1997 if (elems.erp_info && elems.erp_info_len >= 1) {
1584 erp_valid = true; 1998 erp_valid = true;
1585 erp_value = elems.erp_info[0]; 1999 erp_value = elems.erp_info[0];
@@ -1714,6 +2128,11 @@ static void ieee80211_sta_timer(unsigned long data)
1714 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2128 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1715 struct ieee80211_local *local = sdata->local; 2129 struct ieee80211_local *local = sdata->local;
1716 2130
2131 if (local->quiescing) {
2132 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2133 return;
2134 }
2135
1717 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); 2136 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
1718 queue_work(local->hw.workqueue, &ifmgd->work); 2137 queue_work(local->hw.workqueue, &ifmgd->work);
1719} 2138}
@@ -1723,10 +2142,8 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata)
1723 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2142 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1724 struct ieee80211_local *local = sdata->local; 2143 struct ieee80211_local *local = sdata->local;
1725 2144
1726 if (local->ops->reset_tsf) { 2145 /* Reset own TSF to allow time synchronization work. */
1727 /* Reset own TSF to allow time synchronization work. */ 2146 drv_reset_tsf(local);
1728 local->ops->reset_tsf(local_to_hw(local));
1729 }
1730 2147
1731 ifmgd->wmm_last_param_set = -1; /* allow any WMM update */ 2148 ifmgd->wmm_last_param_set = -1; /* allow any WMM update */
1732 2149
@@ -1814,25 +2231,18 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
1814 return 0; 2231 return 0;
1815 } else { 2232 } else {
1816 if (ifmgd->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) { 2233 if (ifmgd->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
2234
1817 ifmgd->assoc_scan_tries++; 2235 ifmgd->assoc_scan_tries++;
1818 /* XXX maybe racy? */
1819 if (local->scan_req)
1820 return -1;
1821 memcpy(local->int_scan_req.ssids[0].ssid,
1822 ifmgd->ssid, IEEE80211_MAX_SSID_LEN);
1823 if (ifmgd->flags & IEEE80211_STA_AUTO_SSID_SEL)
1824 local->int_scan_req.ssids[0].ssid_len = 0;
1825 else
1826 local->int_scan_req.ssids[0].ssid_len = ifmgd->ssid_len;
1827 2236
1828 if (ieee80211_start_scan(sdata, &local->int_scan_req)) 2237 ieee80211_request_internal_scan(sdata, ifmgd->ssid,
1829 ieee80211_scan_failed(local); 2238 ssid_len);
1830 2239
1831 ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE; 2240 ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE;
1832 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); 2241 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
1833 } else { 2242 } else {
1834 ifmgd->assoc_scan_tries = 0; 2243 ifmgd->assoc_scan_tries = 0;
1835 ifmgd->state = IEEE80211_STA_MLME_DISABLED; 2244 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
2245 ieee80211_recalc_idle(local);
1836 } 2246 }
1837 } 2247 }
1838 return -1; 2248 return -1;
@@ -1855,6 +2265,17 @@ static void ieee80211_sta_work(struct work_struct *work)
1855 2265
1856 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 2266 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1857 return; 2267 return;
2268
2269 /*
2270 * Nothing should have been stuffed into the workqueue during
2271 * the suspend->resume cycle. If this WARN is seen then there
2272 * is a bug with either the driver suspend or something in
2273 * mac80211 stuffing into the workqueue which we haven't yet
2274 * cleared during mac80211's suspend cycle.
2275 */
2276 if (WARN_ON(local->suspended))
2277 return;
2278
1858 ifmgd = &sdata->u.mgd; 2279 ifmgd = &sdata->u.mgd;
1859 2280
1860 while ((skb = skb_dequeue(&ifmgd->skb_queue))) 2281 while ((skb = skb_dequeue(&ifmgd->skb_queue)))
@@ -1864,14 +2285,8 @@ static void ieee80211_sta_work(struct work_struct *work)
1864 ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE && 2285 ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE &&
1865 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE && 2286 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE &&
1866 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) { 2287 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) {
1867 /* 2288 queue_delayed_work(local->hw.workqueue, &local->scan_work,
1868 * The call to ieee80211_start_scan can fail but ieee80211_request_scan 2289 round_jiffies_relative(0));
1869 * (which queued ieee80211_sta_work) did not return an error. Thus, call
1870 * ieee80211_scan_failed here if ieee80211_start_scan fails in order to
1871 * notify the scan requester.
1872 */
1873 if (ieee80211_start_scan(sdata, local->scan_req))
1874 ieee80211_scan_failed(local);
1875 return; 2290 return;
1876 } 2291 }
1877 2292
@@ -1882,6 +2297,8 @@ static void ieee80211_sta_work(struct work_struct *work)
1882 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request)) 2297 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request))
1883 return; 2298 return;
1884 2299
2300 ieee80211_recalc_idle(local);
2301
1885 switch (ifmgd->state) { 2302 switch (ifmgd->state) {
1886 case IEEE80211_STA_MLME_DISABLED: 2303 case IEEE80211_STA_MLME_DISABLED:
1887 break; 2304 break;
@@ -1926,10 +2343,43 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
1926 } 2343 }
1927} 2344}
1928 2345
2346#ifdef CONFIG_PM
2347void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2348{
2349 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2350
2351 /*
2352 * we need to use atomic bitops for the running bits
2353 * only because both timers might fire at the same
2354 * time -- the code here is properly synchronised.
2355 */
2356
2357 cancel_work_sync(&ifmgd->work);
2358 cancel_work_sync(&ifmgd->beacon_loss_work);
2359 if (del_timer_sync(&ifmgd->timer))
2360 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2361
2362 cancel_work_sync(&ifmgd->chswitch_work);
2363 if (del_timer_sync(&ifmgd->chswitch_timer))
2364 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
2365}
2366
2367void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2368{
2369 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2370
2371 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
2372 add_timer(&ifmgd->timer);
2373 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
2374 add_timer(&ifmgd->chswitch_timer);
2375}
2376#endif
2377
1929/* interface setup */ 2378/* interface setup */
1930void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) 2379void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1931{ 2380{
1932 struct ieee80211_if_managed *ifmgd; 2381 struct ieee80211_if_managed *ifmgd;
2382 u32 hw_flags;
1933 2383
1934 ifmgd = &sdata->u.mgd; 2384 ifmgd = &sdata->u.mgd;
1935 INIT_WORK(&ifmgd->work, ieee80211_sta_work); 2385 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
@@ -1949,6 +2399,13 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1949 IEEE80211_STA_AUTO_CHANNEL_SEL; 2399 IEEE80211_STA_AUTO_CHANNEL_SEL;
1950 if (sdata->local->hw.queues >= 4) 2400 if (sdata->local->hw.queues >= 4)
1951 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; 2401 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
2402
2403 hw_flags = sdata->local->hw.flags;
2404
2405 if (hw_flags & IEEE80211_HW_SUPPORTS_PS) {
2406 ifmgd->powersave = CONFIG_MAC80211_DEFAULT_PS_VALUE;
2407 sdata->local->hw.conf.dynamic_ps_timeout = 500;
2408 }
1952} 2409}
1953 2410
1954/* configuration hooks */ 2411/* configuration hooks */
@@ -2032,13 +2489,6 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
2032 ifmgd->flags &= ~IEEE80211_STA_BSSID_SET; 2489 ifmgd->flags &= ~IEEE80211_STA_BSSID_SET;
2033 } 2490 }
2034 2491
2035 if (netif_running(sdata->dev)) {
2036 if (ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID)) {
2037 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
2038 "the low-level driver\n", sdata->dev->name);
2039 }
2040 }
2041
2042 return ieee80211_sta_commit(sdata); 2492 return ieee80211_sta_commit(sdata);
2043} 2493}
2044 2494
@@ -2047,6 +2497,13 @@ int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
2047{ 2497{
2048 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2498 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2049 2499
2500 if (len == 0 && ifmgd->extra_ie_len == 0)
2501 return -EALREADY;
2502
2503 if (len == ifmgd->extra_ie_len && ifmgd->extra_ie &&
2504 memcmp(ifmgd->extra_ie, ie, len) == 0)
2505 return -EALREADY;
2506
2050 kfree(ifmgd->extra_ie); 2507 kfree(ifmgd->extra_ie);
2051 if (len == 0) { 2508 if (len == 0) {
2052 ifmgd->extra_ie = NULL; 2509 ifmgd->extra_ie = NULL;
@@ -2068,9 +2525,6 @@ int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason
2068 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", 2525 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
2069 sdata->dev->name, reason); 2526 sdata->dev->name, reason);
2070 2527
2071 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2072 return -EINVAL;
2073
2074 ieee80211_set_disassoc(sdata, true, true, reason); 2528 ieee80211_set_disassoc(sdata, true, true, reason);
2075 return 0; 2529 return 0;
2076} 2530}
@@ -2082,9 +2536,6 @@ int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
2082 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", 2536 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
2083 sdata->dev->name, reason); 2537 sdata->dev->name, reason);
2084 2538
2085 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2086 return -EINVAL;
2087
2088 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED)) 2539 if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED))
2089 return -ENOLINK; 2540 return -ENOLINK;
2090 2541
@@ -2104,75 +2555,17 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
2104 rcu_read_unlock(); 2555 rcu_read_unlock();
2105} 2556}
2106 2557
2107void ieee80211_dynamic_ps_disable_work(struct work_struct *work) 2558int ieee80211_max_network_latency(struct notifier_block *nb,
2559 unsigned long data, void *dummy)
2108{ 2560{
2561 s32 latency_usec = (s32) data;
2109 struct ieee80211_local *local = 2562 struct ieee80211_local *local =
2110 container_of(work, struct ieee80211_local, 2563 container_of(nb, struct ieee80211_local,
2111 dynamic_ps_disable_work); 2564 network_latency_notifier);
2112
2113 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
2114 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
2115 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2116 }
2117 2565
2118 ieee80211_wake_queues_by_reason(&local->hw, 2566 mutex_lock(&local->iflist_mtx);
2119 IEEE80211_QUEUE_STOP_REASON_PS); 2567 ieee80211_recalc_ps(local, latency_usec);
2120} 2568 mutex_unlock(&local->iflist_mtx);
2121
2122void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
2123{
2124 struct ieee80211_local *local =
2125 container_of(work, struct ieee80211_local,
2126 dynamic_ps_enable_work);
2127 /* XXX: using scan_sdata is completely broken! */
2128 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2129
2130 if (local->hw.conf.flags & IEEE80211_CONF_PS)
2131 return;
2132 2569
2133 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata) 2570 return 0;
2134 ieee80211_send_nullfunc(local, sdata, 1);
2135
2136 local->hw.conf.flags |= IEEE80211_CONF_PS;
2137 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2138}
2139
2140void ieee80211_dynamic_ps_timer(unsigned long data)
2141{
2142 struct ieee80211_local *local = (void *) data;
2143
2144 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
2145}
2146
2147void ieee80211_send_nullfunc(struct ieee80211_local *local,
2148 struct ieee80211_sub_if_data *sdata,
2149 int powersave)
2150{
2151 struct sk_buff *skb;
2152 struct ieee80211_hdr *nullfunc;
2153 __le16 fc;
2154
2155 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
2156 return;
2157
2158 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
2159 if (!skb) {
2160 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2161 "frame\n", sdata->dev->name);
2162 return;
2163 }
2164 skb_reserve(skb, local->hw.extra_tx_headroom);
2165
2166 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
2167 memset(nullfunc, 0, 24);
2168 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
2169 IEEE80211_FCTL_TODS);
2170 if (powersave)
2171 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
2172 nullfunc->frame_control = fc;
2173 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
2174 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
2175 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
2176
2177 ieee80211_tx_skb(sdata, skb, 0);
2178} 2571}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 81985d27cbda..7a549f9deb96 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -2,6 +2,8 @@
2#include <net/rtnetlink.h> 2#include <net/rtnetlink.h>
3 3
4#include "ieee80211_i.h" 4#include "ieee80211_i.h"
5#include "mesh.h"
6#include "driver-ops.h"
5#include "led.h" 7#include "led.h"
6 8
7int __ieee80211_suspend(struct ieee80211_hw *hw) 9int __ieee80211_suspend(struct ieee80211_hw *hw)
@@ -12,11 +14,30 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
12 struct sta_info *sta; 14 struct sta_info *sta;
13 unsigned long flags; 15 unsigned long flags;
14 16
17 ieee80211_scan_cancel(local);
18
15 ieee80211_stop_queues_by_reason(hw, 19 ieee80211_stop_queues_by_reason(hw,
16 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 20 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
17 21
22 /* flush out all packets */
23 synchronize_net();
24
25 local->quiescing = true;
26 /* make quiescing visible to timers everywhere */
27 mb();
28
18 flush_workqueue(local->hw.workqueue); 29 flush_workqueue(local->hw.workqueue);
19 30
31 /* Don't try to run timers while suspended. */
32 del_timer_sync(&local->sta_cleanup);
33
34 /*
35 * Note that this particular timer doesn't need to be
36 * restarted at resume.
37 */
38 cancel_work_sync(&local->dynamic_ps_enable_work);
39 del_timer_sync(&local->dynamic_ps_timer);
40
20 /* disable keys */ 41 /* disable keys */
21 list_for_each_entry(sdata, &local->interfaces, list) 42 list_for_each_entry(sdata, &local->interfaces, list)
22 ieee80211_disable_keys(sdata); 43 ieee80211_disable_keys(sdata);
@@ -34,157 +55,70 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
34 55
35 rcu_read_unlock(); 56 rcu_read_unlock();
36 57
37 /* remove STAs */
38 if (local->ops->sta_notify) {
39 spin_lock_irqsave(&local->sta_lock, flags);
40 list_for_each_entry(sta, &local->sta_list, list) {
41 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
42 sdata = container_of(sdata->bss,
43 struct ieee80211_sub_if_data,
44 u.ap);
45
46 local->ops->sta_notify(hw, &sdata->vif,
47 STA_NOTIFY_REMOVE, &sta->sta);
48 }
49 spin_unlock_irqrestore(&local->sta_lock, flags);
50 }
51
52 /* remove all interfaces */
53 list_for_each_entry(sdata, &local->interfaces, list) {
54 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
55 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
56 netif_running(sdata->dev)) {
57 conf.vif = &sdata->vif;
58 conf.type = sdata->vif.type;
59 conf.mac_addr = sdata->dev->dev_addr;
60 local->ops->remove_interface(hw, &conf);
61 }
62 }
63
64 /* flush again, in case driver queued work */ 58 /* flush again, in case driver queued work */
65 flush_workqueue(local->hw.workqueue); 59 flush_workqueue(local->hw.workqueue);
66 60
67 /* stop hardware */ 61 /* stop hardware - this must stop RX */
68 if (local->open_count) { 62 if (local->open_count) {
69 ieee80211_led_radio(local, false); 63 ieee80211_led_radio(local, false);
70 local->ops->stop(hw); 64 drv_stop(local);
71 }
72 return 0;
73}
74
75int __ieee80211_resume(struct ieee80211_hw *hw)
76{
77 struct ieee80211_local *local = hw_to_local(hw);
78 struct ieee80211_sub_if_data *sdata;
79 struct ieee80211_if_init_conf conf;
80 struct sta_info *sta;
81 unsigned long flags;
82 int res;
83
84 /* restart hardware */
85 if (local->open_count) {
86 res = local->ops->start(hw);
87
88 ieee80211_led_radio(local, hw->conf.radio_enabled);
89 } 65 }
90 66
91 /* add interfaces */ 67 /* remove STAs */
92 list_for_each_entry(sdata, &local->interfaces, list) { 68 spin_lock_irqsave(&local->sta_lock, flags);
93 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 69 list_for_each_entry(sta, &local->sta_list, list) {
94 sdata->vif.type != NL80211_IFTYPE_MONITOR && 70 if (local->ops->sta_notify) {
95 netif_running(sdata->dev)) { 71 sdata = sta->sdata;
96 conf.vif = &sdata->vif;
97 conf.type = sdata->vif.type;
98 conf.mac_addr = sdata->dev->dev_addr;
99 res = local->ops->add_interface(hw, &conf);
100 }
101 }
102
103 /* add STAs back */
104 if (local->ops->sta_notify) {
105 spin_lock_irqsave(&local->sta_lock, flags);
106 list_for_each_entry(sta, &local->sta_list, list) {
107 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 72 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
108 sdata = container_of(sdata->bss, 73 sdata = container_of(sdata->bss,
109 struct ieee80211_sub_if_data, 74 struct ieee80211_sub_if_data,
110 u.ap); 75 u.ap);
111 76
112 local->ops->sta_notify(hw, &sdata->vif, 77 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
113 STA_NOTIFY_ADD, &sta->sta); 78 &sta->sta);
114 } 79 }
115 spin_unlock_irqrestore(&local->sta_lock, flags);
116 }
117
118 /* Clear Suspend state so that ADDBA requests can be processed */
119
120 rcu_read_lock();
121 80
122 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 81 mesh_plink_quiesce(sta);
123 list_for_each_entry_rcu(sta, &local->sta_list, list) {
124 clear_sta_flags(sta, WLAN_STA_SUSPEND);
125 }
126 } 82 }
83 spin_unlock_irqrestore(&local->sta_lock, flags);
127 84
128 rcu_read_unlock(); 85 /* remove all interfaces */
129
130 /* add back keys */
131 list_for_each_entry(sdata, &local->interfaces, list)
132 if (netif_running(sdata->dev))
133 ieee80211_enable_keys(sdata);
134
135 /* setup RTS threshold */
136 if (local->ops->set_rts_threshold)
137 local->ops->set_rts_threshold(hw, local->rts_threshold);
138
139 /* reconfigure hardware */
140 ieee80211_hw_config(local, ~0);
141
142 netif_addr_lock_bh(local->mdev);
143 ieee80211_configure_filter(local);
144 netif_addr_unlock_bh(local->mdev);
145
146 /* Finally also reconfigure all the BSS information */
147 list_for_each_entry(sdata, &local->interfaces, list) { 86 list_for_each_entry(sdata, &local->interfaces, list) {
148 u32 changed = ~0; 87 switch(sdata->vif.type) {
149 if (!netif_running(sdata->dev))
150 continue;
151 switch (sdata->vif.type) {
152 case NL80211_IFTYPE_STATION: 88 case NL80211_IFTYPE_STATION:
153 /* disable beacon change bits */ 89 ieee80211_sta_quiesce(sdata);
154 changed &= ~IEEE80211_IFCC_BEACON; 90 break;
155 /* fall through */
156 case NL80211_IFTYPE_ADHOC: 91 case NL80211_IFTYPE_ADHOC:
157 case NL80211_IFTYPE_AP: 92 ieee80211_ibss_quiesce(sdata);
158 case NL80211_IFTYPE_MESH_POINT:
159 /*
160 * Driver's config_interface can fail if rfkill is
161 * enabled. Accommodate this return code.
162 * FIXME: When mac80211 has knowledge of rfkill
163 * state the code below can change back to:
164 * WARN(ieee80211_if_config(sdata, changed));
165 * ieee80211_bss_info_change_notify(sdata, ~0);
166 */
167 if (ieee80211_if_config(sdata, changed))
168 printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
169 sdata->dev->name);
170 else
171 ieee80211_bss_info_change_notify(sdata, ~0);
172 break; 93 break;
173 case NL80211_IFTYPE_WDS: 94 case NL80211_IFTYPE_MESH_POINT:
95 ieee80211_mesh_quiesce(sdata);
174 break; 96 break;
175 case NL80211_IFTYPE_AP_VLAN: 97 case NL80211_IFTYPE_AP_VLAN:
176 case NL80211_IFTYPE_MONITOR: 98 case NL80211_IFTYPE_MONITOR:
177 /* ignore virtual */ 99 /* don't tell driver about this */
178 break; 100 continue;
179 case NL80211_IFTYPE_UNSPECIFIED: 101 default:
180 case __NL80211_IFTYPE_AFTER_LAST:
181 WARN_ON(1);
182 break; 102 break;
183 } 103 }
104
105 if (!netif_running(sdata->dev))
106 continue;
107
108 conf.vif = &sdata->vif;
109 conf.type = sdata->vif.type;
110 conf.mac_addr = sdata->dev->dev_addr;
111 drv_remove_interface(local, &conf);
184 } 112 }
185 113
186 ieee80211_wake_queues_by_reason(hw, 114 local->suspended = true;
187 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 115 local->quiescing = false;
188 116
189 return 0; 117 return 0;
190} 118}
119
120/*
121 * __ieee80211_resume() is a static inline which just calls
122 * ieee80211_reconfig(), which is also needed for hardware
123 * hang/firmware failure/etc. recovery.
124 */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index d9233ec50610..0a11515341ba 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -80,8 +80,7 @@ use_low_rate(struct sk_buff *skb)
80 fc = le16_to_cpu(hdr->frame_control); 80 fc = le16_to_cpu(hdr->frame_control);
81 81
82 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || 82 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
83 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 83 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA);
84 is_multicast_ether_addr(hdr->addr1));
85} 84}
86 85
87 86
@@ -245,7 +244,10 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
245 244
246 if (!sta || !mi || use_low_rate(skb)) { 245 if (!sta || !mi || use_low_rate(skb)) {
247 ar[0].idx = rate_lowest_index(sband, sta); 246 ar[0].idx = rate_lowest_index(sband, sta);
248 ar[0].count = mp->max_retry; 247 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
248 ar[0].count = 1;
249 else
250 ar[0].count = mp->max_retry;
249 return; 251 return;
250 } 252 }
251 253
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 8bef9a1262ff..a0bef767ceb5 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -289,13 +289,15 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
289 info->control.rates[0].count = 289 info->control.rates[0].count =
290 txrc->hw->conf.short_frame_max_tx_count; 290 txrc->hw->conf.short_frame_max_tx_count;
291 291
292 /* Send management frames and broadcast/multicast data using lowest 292 /* Send management frames and NO_ACK data using lowest rate. */
293 * rate. */
294 fc = le16_to_cpu(hdr->frame_control); 293 fc = le16_to_cpu(hdr->frame_control);
295 if (!sta || !spinfo || 294 if (!sta || !spinfo ||
296 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 295 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
297 is_multicast_ether_addr(hdr->addr1)) { 296 info->flags & IEEE80211_TX_CTL_NO_ACK) {
298 info->control.rates[0].idx = rate_lowest_index(sband, sta); 297 info->control.rates[0].idx = rate_lowest_index(sband, sta);
298 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
299 info->control.rates[0].count = 1;
300
299 return; 301 return;
300 } 302 }
301 303
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9776f73c51ad..6a9b8e63a6bf 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -19,6 +19,7 @@
19#include <net/ieee80211_radiotap.h> 19#include <net/ieee80211_radiotap.h>
20 20
21#include "ieee80211_i.h" 21#include "ieee80211_i.h"
22#include "driver-ops.h"
22#include "led.h" 23#include "led.h"
23#include "mesh.h" 24#include "mesh.h"
24#include "wep.h" 25#include "wep.h"
@@ -629,15 +630,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
629 * possible. 630 * possible.
630 */ 631 */
631 632
632 if (!ieee80211_has_protected(hdr->frame_control)) {
633 if (!ieee80211_is_mgmt(hdr->frame_control) ||
634 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
635 return RX_CONTINUE;
636 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
637 if (mmie_keyidx < 0)
638 return RX_CONTINUE;
639 }
640
641 /* 633 /*
642 * No point in finding a key and decrypting if the frame is neither 634 * No point in finding a key and decrypting if the frame is neither
643 * addressed to us nor a multicast frame. 635 * addressed to us nor a multicast frame.
@@ -648,8 +640,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
648 if (rx->sta) 640 if (rx->sta)
649 stakey = rcu_dereference(rx->sta->key); 641 stakey = rcu_dereference(rx->sta->key);
650 642
643 if (!ieee80211_has_protected(hdr->frame_control))
644 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
645
651 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 646 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
652 rx->key = stakey; 647 rx->key = stakey;
648 /* Skip decryption if the frame is not protected. */
649 if (!ieee80211_has_protected(hdr->frame_control))
650 return RX_CONTINUE;
653 } else if (mmie_keyidx >= 0) { 651 } else if (mmie_keyidx >= 0) {
654 /* Broadcast/multicast robust management frame / BIP */ 652 /* Broadcast/multicast robust management frame / BIP */
655 if ((rx->status->flag & RX_FLAG_DECRYPTED) && 653 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
@@ -660,6 +658,21 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
660 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 658 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
661 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 659 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
662 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 660 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
661 } else if (!ieee80211_has_protected(hdr->frame_control)) {
662 /*
663 * The frame was not protected, so skip decryption. However, we
664 * need to set rx->key if there is a key that could have been
665 * used so that the frame may be dropped if encryption would
666 * have been expected.
667 */
668 struct ieee80211_key *key = NULL;
669 if (ieee80211_is_mgmt(hdr->frame_control) &&
670 is_multicast_ether_addr(hdr->addr1) &&
671 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
672 rx->key = key;
673 else if ((key = rcu_dereference(rx->sdata->default_key)))
674 rx->key = key;
675 return RX_CONTINUE;
663 } else { 676 } else {
664 /* 677 /*
665 * The device doesn't give us the IV so we won't be 678 * The device doesn't give us the IV so we won't be
@@ -773,9 +786,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
773 786
774 atomic_inc(&sdata->bss->num_sta_ps); 787 atomic_inc(&sdata->bss->num_sta_ps);
775 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); 788 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
776 if (local->ops->sta_notify) 789 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
777 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
778 STA_NOTIFY_SLEEP, &sta->sta);
779#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 790#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
780 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 791 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
781 sdata->dev->name, sta->sta.addr, sta->sta.aid); 792 sdata->dev->name, sta->sta.addr, sta->sta.aid);
@@ -792,9 +803,7 @@ static int ap_sta_ps_end(struct sta_info *sta)
792 atomic_dec(&sdata->bss->num_sta_ps); 803 atomic_dec(&sdata->bss->num_sta_ps);
793 804
794 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); 805 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
795 if (local->ops->sta_notify) 806 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
796 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
797 STA_NOTIFY_AWAKE, &sta->sta);
798 807
799 if (!skb_queue_empty(&sta->ps_tx_buf)) 808 if (!skb_queue_empty(&sta->ps_tx_buf))
800 sta_info_clear_tim_bit(sta); 809 sta_info_clear_tim_bit(sta);
@@ -1212,109 +1221,38 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1212 /* Drop unencrypted frames if key is set. */ 1221 /* Drop unencrypted frames if key is set. */
1213 if (unlikely(!ieee80211_has_protected(fc) && 1222 if (unlikely(!ieee80211_has_protected(fc) &&
1214 !ieee80211_is_nullfunc(fc) && 1223 !ieee80211_is_nullfunc(fc) &&
1215 (!ieee80211_is_mgmt(fc) || 1224 ieee80211_is_data(fc) &&
1216 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1217 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1218 (rx->key || rx->sdata->drop_unencrypted)))
1219 return -EACCES;
1220 /* BIP does not use Protected field, so need to check MMIE */
1221 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1222 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1223 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1224 (rx->key || rx->sdata->drop_unencrypted))) 1225 (rx->key || rx->sdata->drop_unencrypted)))
1225 return -EACCES; 1226 return -EACCES;
1227 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1228 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1229 rx->key))
1230 return -EACCES;
1231 /* BIP does not use Protected field, so need to check MMIE */
1232 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
1233 && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1234 rx->key))
1235 return -EACCES;
1236 /*
1237 * When using MFP, Action frames are not allowed prior to
1238 * having configured keys.
1239 */
1240 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1241 ieee80211_is_robust_mgmt_frame(
1242 (struct ieee80211_hdr *) rx->skb->data)))
1243 return -EACCES;
1244 }
1226 1245
1227 return 0; 1246 return 0;
1228} 1247}
1229 1248
1230static int 1249static int
1231ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1250__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1232{ 1251{
1233 struct net_device *dev = rx->dev; 1252 struct net_device *dev = rx->dev;
1234 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1235 u16 hdrlen, ethertype;
1236 u8 *payload;
1237 u8 dst[ETH_ALEN];
1238 u8 src[ETH_ALEN] __aligned(2);
1239 struct sk_buff *skb = rx->skb;
1240 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1253 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1241 1254
1242 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1255 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1243 return -1;
1244
1245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1246
1247 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1248 * header
1249 * IEEE 802.11 address fields:
1250 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1251 * 0 0 DA SA BSSID n/a
1252 * 0 1 DA BSSID SA n/a
1253 * 1 0 BSSID SA DA n/a
1254 * 1 1 RA TA DA SA
1255 */
1256 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1257 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1258
1259 switch (hdr->frame_control &
1260 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1261 case cpu_to_le16(IEEE80211_FCTL_TODS):
1262 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1263 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1264 return -1;
1265 break;
1266 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1267 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1268 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1269 return -1;
1270 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1271 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1272 (skb->data + hdrlen);
1273 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1274 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1275 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1276 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1277 }
1278 }
1279 break;
1280 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1281 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1282 (is_multicast_ether_addr(dst) &&
1283 !compare_ether_addr(src, dev->dev_addr)))
1284 return -1;
1285 break;
1286 case cpu_to_le16(0):
1287 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1288 return -1;
1289 break;
1290 }
1291
1292 if (unlikely(skb->len - hdrlen < 8))
1293 return -1;
1294
1295 payload = skb->data + hdrlen;
1296 ethertype = (payload[6] << 8) | payload[7];
1297
1298 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1299 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1300 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1301 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1302 * replace EtherType */
1303 skb_pull(skb, hdrlen + 6);
1304 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1305 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1306 } else {
1307 struct ethhdr *ehdr;
1308 __be16 len;
1309
1310 skb_pull(skb, hdrlen);
1311 len = htons(skb->len);
1312 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1313 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1314 memcpy(ehdr->h_source, src, ETH_ALEN);
1315 ehdr->h_proto = len;
1316 }
1317 return 0;
1318} 1256}
1319 1257
1320/* 1258/*
@@ -1453,7 +1391,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1453 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1391 if (!(rx->flags & IEEE80211_RX_AMSDU))
1454 return RX_CONTINUE; 1392 return RX_CONTINUE;
1455 1393
1456 err = ieee80211_data_to_8023(rx); 1394 err = __ieee80211_data_to_8023(rx);
1457 if (unlikely(err)) 1395 if (unlikely(err))
1458 return RX_DROP_UNUSABLE; 1396 return RX_DROP_UNUSABLE;
1459 1397
@@ -1639,7 +1577,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1639 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1577 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1640 return RX_DROP_MONITOR; 1578 return RX_DROP_MONITOR;
1641 1579
1642 err = ieee80211_data_to_8023(rx); 1580 err = __ieee80211_data_to_8023(rx);
1643 if (unlikely(err)) 1581 if (unlikely(err))
1644 return RX_DROP_UNUSABLE; 1582 return RX_DROP_UNUSABLE;
1645 1583
@@ -1827,6 +1765,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1827 sizeof(mgmt->u.action.u.chan_switch))) 1765 sizeof(mgmt->u.action.u.chan_switch)))
1828 return RX_DROP_MONITOR; 1766 return RX_DROP_MONITOR;
1829 1767
1768 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1769 return RX_DROP_MONITOR;
1770
1830 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 1771 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1831 return RX_DROP_MONITOR; 1772 return RX_DROP_MONITOR;
1832 1773
@@ -1837,7 +1778,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1837 if (!bss) 1778 if (!bss)
1838 return RX_DROP_MONITOR; 1779 return RX_DROP_MONITOR;
1839 1780
1840 ieee80211_process_chanswitch(sdata, 1781 ieee80211_sta_process_chanswitch(sdata,
1841 &mgmt->u.action.u.chan_switch.sw_elem, bss); 1782 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1842 ieee80211_rx_bss_put(local, bss); 1783 ieee80211_rx_bss_put(local, bss);
1843 break; 1784 break;
@@ -1932,7 +1873,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1932 !ieee80211_is_auth(hdr->frame_control)) 1873 !ieee80211_is_auth(hdr->frame_control))
1933 goto ignore; 1874 goto ignore;
1934 1875
1935 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr); 1876 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL);
1936 ignore: 1877 ignore:
1937 dev_kfree_skb(rx->skb); 1878 dev_kfree_skb(rx->skb);
1938 rx->skb = NULL; 1879 rx->skb = NULL;
@@ -2287,6 +2228,43 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
2287} 2228}
2288 2229
2289 2230
2231static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2232 struct tid_ampdu_rx *tid_agg_rx,
2233 int index)
2234{
2235 struct ieee80211_supported_band *sband;
2236 struct ieee80211_rate *rate;
2237 struct ieee80211_rx_status status;
2238
2239 if (!tid_agg_rx->reorder_buf[index])
2240 goto no_frame;
2241
2242 /* release the reordered frames to stack */
2243 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status));
2244 sband = hw->wiphy->bands[status.band];
2245 if (status.flag & RX_FLAG_HT)
2246 rate = sband->bitrates; /* TODO: HT rates */
2247 else
2248 rate = &sband->bitrates[status.rate_idx];
2249 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2250 &status, rate);
2251 tid_agg_rx->stored_mpdu_num--;
2252 tid_agg_rx->reorder_buf[index] = NULL;
2253
2254no_frame:
2255 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2256}
2257
2258
2259/*
2260 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2261 * the skb was added to the buffer longer than this time ago, the earlier
2262 * frames that have not yet been received are assumed to be lost and the skb
2263 * can be released for processing. This may also release other skb's from the
2264 * reorder buffer if there are no additional gaps between the frames.
2265 */
2266#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2267
2290/* 2268/*
2291 * As it function blongs to Rx path it must be called with 2269 * As it function blongs to Rx path it must be called with
2292 * the proper rcu_read_lock protection for its flow. 2270 * the proper rcu_read_lock protection for its flow.
@@ -2298,12 +2276,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2298 u16 mpdu_seq_num, 2276 u16 mpdu_seq_num,
2299 int bar_req) 2277 int bar_req)
2300{ 2278{
2301 struct ieee80211_local *local = hw_to_local(hw);
2302 struct ieee80211_rx_status status;
2303 u16 head_seq_num, buf_size; 2279 u16 head_seq_num, buf_size;
2304 int index; 2280 int index;
2305 struct ieee80211_supported_band *sband;
2306 struct ieee80211_rate *rate;
2307 2281
2308 buf_size = tid_agg_rx->buf_size; 2282 buf_size = tid_agg_rx->buf_size;
2309 head_seq_num = tid_agg_rx->head_seq_num; 2283 head_seq_num = tid_agg_rx->head_seq_num;
@@ -2328,28 +2302,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2328 index = seq_sub(tid_agg_rx->head_seq_num, 2302 index = seq_sub(tid_agg_rx->head_seq_num,
2329 tid_agg_rx->ssn) 2303 tid_agg_rx->ssn)
2330 % tid_agg_rx->buf_size; 2304 % tid_agg_rx->buf_size;
2331 2305 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2332 if (tid_agg_rx->reorder_buf[index]) { 2306 index);
2333 /* release the reordered frames to stack */
2334 memcpy(&status,
2335 tid_agg_rx->reorder_buf[index]->cb,
2336 sizeof(status));
2337 sband = local->hw.wiphy->bands[status.band];
2338 if (status.flag & RX_FLAG_HT) {
2339 /* TODO: HT rates */
2340 rate = sband->bitrates;
2341 } else {
2342 rate = &sband->bitrates
2343 [status.rate_idx];
2344 }
2345 __ieee80211_rx_handle_packet(hw,
2346 tid_agg_rx->reorder_buf[index],
2347 &status, rate);
2348 tid_agg_rx->stored_mpdu_num--;
2349 tid_agg_rx->reorder_buf[index] = NULL;
2350 }
2351 tid_agg_rx->head_seq_num =
2352 seq_inc(tid_agg_rx->head_seq_num);
2353 } 2307 }
2354 if (bar_req) 2308 if (bar_req)
2355 return 1; 2309 return 1;
@@ -2376,26 +2330,50 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2376 2330
2377 /* put the frame in the reordering buffer */ 2331 /* put the frame in the reordering buffer */
2378 tid_agg_rx->reorder_buf[index] = skb; 2332 tid_agg_rx->reorder_buf[index] = skb;
2333 tid_agg_rx->reorder_time[index] = jiffies;
2379 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus, 2334 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2380 sizeof(*rxstatus)); 2335 sizeof(*rxstatus));
2381 tid_agg_rx->stored_mpdu_num++; 2336 tid_agg_rx->stored_mpdu_num++;
2382 /* release the buffer until next missing frame */ 2337 /* release the buffer until next missing frame */
2383 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) 2338 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2384 % tid_agg_rx->buf_size; 2339 % tid_agg_rx->buf_size;
2385 while (tid_agg_rx->reorder_buf[index]) { 2340 if (!tid_agg_rx->reorder_buf[index] &&
2386 /* release the reordered frame back to stack */ 2341 tid_agg_rx->stored_mpdu_num > 1) {
2387 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, 2342 /*
2388 sizeof(status)); 2343 * No buffers ready to be released, but check whether any
2389 sband = local->hw.wiphy->bands[status.band]; 2344 * frames in the reorder buffer have timed out.
2390 if (status.flag & RX_FLAG_HT) 2345 */
2391 rate = sband->bitrates; /* TODO: HT rates */ 2346 int j;
2392 else 2347 int skipped = 1;
2393 rate = &sband->bitrates[status.rate_idx]; 2348 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2394 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 2349 j = (j + 1) % tid_agg_rx->buf_size) {
2395 &status, rate); 2350 if (tid_agg_rx->reorder_buf[j] == NULL) {
2396 tid_agg_rx->stored_mpdu_num--; 2351 skipped++;
2397 tid_agg_rx->reorder_buf[index] = NULL; 2352 continue;
2398 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2353 }
2354 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2355 HZ / 10))
2356 break;
2357
2358#ifdef CONFIG_MAC80211_HT_DEBUG
2359 if (net_ratelimit())
2360 printk(KERN_DEBUG "%s: release an RX reorder "
2361 "frame due to timeout on earlier "
2362 "frames\n",
2363 wiphy_name(hw->wiphy));
2364#endif
2365 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2366
2367 /*
2368 * Increment the head seq# also for the skipped slots.
2369 */
2370 tid_agg_rx->head_seq_num =
2371 (tid_agg_rx->head_seq_num + skipped) &
2372 SEQ_MASK;
2373 skipped = 0;
2374 }
2375 } else while (tid_agg_rx->reorder_buf[index]) {
2376 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2399 index = seq_sub(tid_agg_rx->head_seq_num, 2377 index = seq_sub(tid_agg_rx->head_seq_num,
2400 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 2378 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2401 } 2379 }
@@ -2517,6 +2495,18 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2517 return; 2495 return;
2518 } 2496 }
2519 2497
2498 /*
2499 * In theory, the block ack reordering should happen after duplicate
2500 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2501 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2502 * happen as a new RX handler between ieee80211_rx_h_check and
2503 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2504 * the time being, the call can be here since RX reorder buf processing
2505 * will implicitly skip duplicates. We could, in theory at least,
2506 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2507 * frames from other than operational channel), but that should not
2508 * happen in normal networks.
2509 */
2520 if (!ieee80211_rx_reorder_ampdu(local, skb, status)) 2510 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2521 __ieee80211_rx_handle_packet(hw, skb, status, rate); 2511 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2522 2512
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 3bf9839f5916..2a8d09ad17ff 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -21,6 +21,7 @@
21#include <net/iw_handler.h> 21#include <net/iw_handler.h>
22 22
23#include "ieee80211_i.h" 23#include "ieee80211_i.h"
24#include "driver-ops.h"
24#include "mesh.h" 25#include "mesh.h"
25 26
26#define IEEE80211_PROBE_DELAY (HZ / 33) 27#define IEEE80211_PROBE_DELAY (HZ / 33)
@@ -202,18 +203,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
202 return RX_QUEUED; 203 return RX_QUEUED;
203} 204}
204 205
205void ieee80211_scan_failed(struct ieee80211_local *local)
206{
207 if (WARN_ON(!local->scan_req))
208 return;
209
210 /* notify cfg80211 about the failed scan */
211 if (local->scan_req != &local->int_scan_req)
212 cfg80211_scan_done(local->scan_req, true);
213
214 local->scan_req = NULL;
215}
216
217/* 206/*
218 * inform AP that we will go to sleep so that it will buffer the frames 207 * inform AP that we will go to sleep so that it will buffer the frames
219 * while we scan 208 * while we scan
@@ -253,7 +242,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
253{ 242{
254 struct ieee80211_local *local = sdata->local; 243 struct ieee80211_local *local = sdata->local;
255 244
256 if (!local->powersave) 245 if (!local->ps_sdata)
257 ieee80211_send_nullfunc(local, sdata, 0); 246 ieee80211_send_nullfunc(local, sdata, 0);
258 else { 247 else {
259 /* 248 /*
@@ -274,51 +263,62 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
274 } 263 }
275} 264}
276 265
266static void ieee80211_restore_scan_ies(struct ieee80211_local *local)
267{
268 kfree(local->scan_req->ie);
269 local->scan_req->ie = local->orig_ies;
270 local->scan_req->ie_len = local->orig_ies_len;
271}
272
277void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 273void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
278{ 274{
279 struct ieee80211_local *local = hw_to_local(hw); 275 struct ieee80211_local *local = hw_to_local(hw);
280 struct ieee80211_sub_if_data *sdata; 276 struct ieee80211_sub_if_data *sdata;
277 bool was_hw_scan;
281 278
282 if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) 279 mutex_lock(&local->scan_mtx);
280
281 if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) {
282 mutex_unlock(&local->scan_mtx);
283 return; 283 return;
284 }
284 285
285 if (WARN_ON(!local->scan_req)) 286 if (WARN_ON(!local->scan_req)) {
287 mutex_unlock(&local->scan_mtx);
286 return; 288 return;
289 }
290
291 if (local->hw_scanning)
292 ieee80211_restore_scan_ies(local);
287 293
288 if (local->scan_req != &local->int_scan_req) 294 if (local->scan_req != &local->int_scan_req)
289 cfg80211_scan_done(local->scan_req, aborted); 295 cfg80211_scan_done(local->scan_req, aborted);
290 local->scan_req = NULL; 296 local->scan_req = NULL;
291 297
292 local->last_scan_completed = jiffies; 298 was_hw_scan = local->hw_scanning;
299 local->hw_scanning = false;
300 local->sw_scanning = false;
301 local->scan_channel = NULL;
293 302
294 if (local->hw_scanning) { 303 /* we only have to protect scan_req and hw/sw scan */
295 local->hw_scanning = false; 304 mutex_unlock(&local->scan_mtx);
296 /*
297 * Somebody might have requested channel change during scan
298 * that we won't have acted upon, try now. ieee80211_hw_config
299 * will set the flag based on actual changes.
300 */
301 ieee80211_hw_config(local, 0);
302 goto done;
303 }
304 305
305 local->sw_scanning = false;
306 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 306 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
307 if (was_hw_scan)
308 goto done;
307 309
308 netif_tx_lock_bh(local->mdev); 310 netif_tx_lock_bh(local->mdev);
309 netif_addr_lock(local->mdev); 311 netif_addr_lock(local->mdev);
310 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; 312 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
311 local->ops->configure_filter(local_to_hw(local), 313 drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
312 FIF_BCN_PRBRESP_PROMISC, 314 &local->filter_flags,
313 &local->filter_flags, 315 local->mdev->mc_count,
314 local->mdev->mc_count, 316 local->mdev->mc_list);
315 local->mdev->mc_list);
316 317
317 netif_addr_unlock(local->mdev); 318 netif_addr_unlock(local->mdev);
318 netif_tx_unlock_bh(local->mdev); 319 netif_tx_unlock_bh(local->mdev);
319 320
320 if (local->ops->sw_scan_complete) 321 drv_sw_scan_complete(local);
321 local->ops->sw_scan_complete(local_to_hw(local));
322 322
323 mutex_lock(&local->iflist_mtx); 323 mutex_lock(&local->iflist_mtx);
324 list_for_each_entry(sdata, &local->interfaces, list) { 324 list_for_each_entry(sdata, &local->interfaces, list) {
@@ -338,18 +338,160 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
338 if (sdata->vif.type == NL80211_IFTYPE_AP || 338 if (sdata->vif.type == NL80211_IFTYPE_AP ||
339 sdata->vif.type == NL80211_IFTYPE_ADHOC || 339 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
340 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 340 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
341 ieee80211_if_config(sdata, 341 ieee80211_bss_info_change_notify(
342 IEEE80211_IFCC_BEACON_ENABLED); 342 sdata, BSS_CHANGED_BEACON_ENABLED);
343 } 343 }
344 mutex_unlock(&local->iflist_mtx); 344 mutex_unlock(&local->iflist_mtx);
345 345
346 done: 346 done:
347 ieee80211_recalc_idle(local);
347 ieee80211_mlme_notify_scan_completed(local); 348 ieee80211_mlme_notify_scan_completed(local);
348 ieee80211_ibss_notify_scan_completed(local); 349 ieee80211_ibss_notify_scan_completed(local);
349 ieee80211_mesh_notify_scan_completed(local); 350 ieee80211_mesh_notify_scan_completed(local);
350} 351}
351EXPORT_SYMBOL(ieee80211_scan_completed); 352EXPORT_SYMBOL(ieee80211_scan_completed);
352 353
354static int ieee80211_start_sw_scan(struct ieee80211_local *local)
355{
356 struct ieee80211_sub_if_data *sdata;
357
358 /*
359 * Hardware/driver doesn't support hw_scan, so use software
360 * scanning instead. First send a nullfunc frame with power save
361 * bit on so that AP will buffer the frames for us while we are not
362 * listening, then send probe requests to each channel and wait for
363 * the responses. After all channels are scanned, tune back to the
364 * original channel and send a nullfunc frame with power save bit
365 * off to trigger the AP to send us all the buffered frames.
366 *
367 * Note that while local->sw_scanning is true everything else but
368 * nullfunc frames and probe requests will be dropped in
369 * ieee80211_tx_h_check_assoc().
370 */
371 drv_sw_scan_start(local);
372
373 mutex_lock(&local->iflist_mtx);
374 list_for_each_entry(sdata, &local->interfaces, list) {
375 if (!netif_running(sdata->dev))
376 continue;
377
378 /* disable beaconing */
379 if (sdata->vif.type == NL80211_IFTYPE_AP ||
380 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
381 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
382 ieee80211_bss_info_change_notify(
383 sdata, BSS_CHANGED_BEACON_ENABLED);
384
385 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
386 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
387 netif_tx_stop_all_queues(sdata->dev);
388 ieee80211_scan_ps_enable(sdata);
389 }
390 } else
391 netif_tx_stop_all_queues(sdata->dev);
392 }
393 mutex_unlock(&local->iflist_mtx);
394
395 local->scan_state = SCAN_SET_CHANNEL;
396 local->scan_channel_idx = 0;
397
398 netif_addr_lock_bh(local->mdev);
399 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
400 drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
401 &local->filter_flags,
402 local->mdev->mc_count,
403 local->mdev->mc_list);
404 netif_addr_unlock_bh(local->mdev);
405
406 /* TODO: start scan as soon as all nullfunc frames are ACKed */
407 queue_delayed_work(local->hw.workqueue, &local->scan_work,
408 IEEE80211_CHANNEL_TIME);
409
410 return 0;
411}
412
413
414static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
415 struct cfg80211_scan_request *req)
416{
417 struct ieee80211_local *local = sdata->local;
418 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
419 int rc;
420
421 if (local->scan_req)
422 return -EBUSY;
423
424 if (local->ops->hw_scan) {
425 u8 *ies;
426 int ielen;
427
428 ies = kmalloc(2 + IEEE80211_MAX_SSID_LEN +
429 local->scan_ies_len + req->ie_len, GFP_KERNEL);
430 if (!ies)
431 return -ENOMEM;
432
433 ielen = ieee80211_build_preq_ies(local, ies,
434 req->ie, req->ie_len);
435 local->orig_ies = req->ie;
436 local->orig_ies_len = req->ie_len;
437 req->ie = ies;
438 req->ie_len = ielen;
439 }
440
441 local->scan_req = req;
442 local->scan_sdata = sdata;
443
444 if (req != &local->int_scan_req &&
445 sdata->vif.type == NL80211_IFTYPE_STATION &&
446 (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE ||
447 ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
448 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE)) {
449 /* actually wait for the assoc to finish/time out */
450 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
451 return 0;
452 }
453
454 if (local->ops->hw_scan)
455 local->hw_scanning = true;
456 else
457 local->sw_scanning = true;
458 /*
459 * Kicking off the scan need not be protected,
460 * only the scan variable stuff, since now
461 * local->scan_req is assigned and other callers
462 * will abort their scan attempts.
463 *
464 * This avoids getting a scan_mtx -> iflist_mtx
465 * dependency, so that the scan completed calls
466 * have more locking freedom.
467 */
468
469 ieee80211_recalc_idle(local);
470 mutex_unlock(&local->scan_mtx);
471
472 if (local->ops->hw_scan)
473 rc = drv_hw_scan(local, local->scan_req);
474 else
475 rc = ieee80211_start_sw_scan(local);
476
477 mutex_lock(&local->scan_mtx);
478
479 if (rc) {
480 if (local->ops->hw_scan) {
481 local->hw_scanning = false;
482 ieee80211_restore_scan_ies(local);
483 } else
484 local->sw_scanning = false;
485
486 ieee80211_recalc_idle(local);
487
488 local->scan_req = NULL;
489 local->scan_sdata = NULL;
490 }
491
492 return rc;
493}
494
353void ieee80211_scan_work(struct work_struct *work) 495void ieee80211_scan_work(struct work_struct *work)
354{ 496{
355 struct ieee80211_local *local = 497 struct ieee80211_local *local =
@@ -359,17 +501,41 @@ void ieee80211_scan_work(struct work_struct *work)
359 int skip, i; 501 int skip, i;
360 unsigned long next_delay = 0; 502 unsigned long next_delay = 0;
361 503
504 mutex_lock(&local->scan_mtx);
505 if (!sdata || !local->scan_req) {
506 mutex_unlock(&local->scan_mtx);
507 return;
508 }
509
510 if (local->scan_req && !(local->sw_scanning || local->hw_scanning)) {
511 struct cfg80211_scan_request *req = local->scan_req;
512 int rc;
513
514 local->scan_req = NULL;
515
516 rc = __ieee80211_start_scan(sdata, req);
517 mutex_unlock(&local->scan_mtx);
518
519 if (rc)
520 ieee80211_scan_completed(&local->hw, true);
521 return;
522 }
523
524 mutex_unlock(&local->scan_mtx);
525
362 /* 526 /*
363 * Avoid re-scheduling when the sdata is going away. 527 * Avoid re-scheduling when the sdata is going away.
364 */ 528 */
365 if (!netif_running(sdata->dev)) 529 if (!netif_running(sdata->dev)) {
530 ieee80211_scan_completed(&local->hw, true);
366 return; 531 return;
532 }
367 533
368 switch (local->scan_state) { 534 switch (local->scan_state) {
369 case SCAN_SET_CHANNEL: 535 case SCAN_SET_CHANNEL:
370 /* if no more bands/channels left, complete scan */ 536 /* if no more bands/channels left, complete scan */
371 if (local->scan_channel_idx >= local->scan_req->n_channels) { 537 if (local->scan_channel_idx >= local->scan_req->n_channels) {
372 ieee80211_scan_completed(local_to_hw(local), false); 538 ieee80211_scan_completed(&local->hw, false);
373 return; 539 return;
374 } 540 }
375 skip = 0; 541 skip = 0;
@@ -393,24 +559,39 @@ void ieee80211_scan_work(struct work_struct *work)
393 if (skip) 559 if (skip)
394 break; 560 break;
395 561
396 next_delay = IEEE80211_PROBE_DELAY + 562 /*
397 usecs_to_jiffies(local->hw.channel_change_time); 563 * Probe delay is used to update the NAV, cf. 11.1.3.2.2
564 * (which unfortunately doesn't say _why_ step a) is done,
565 * but it waits for the probe delay or until a frame is
566 * received - and the received frame would update the NAV).
567 * For now, we do not support waiting until a frame is
568 * received.
569 *
570 * In any case, it is not necessary for a passive scan.
571 */
572 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
573 !local->scan_req->n_ssids) {
574 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
575 break;
576 }
577
578 next_delay = IEEE80211_PROBE_DELAY;
398 local->scan_state = SCAN_SEND_PROBE; 579 local->scan_state = SCAN_SEND_PROBE;
399 break; 580 break;
400 case SCAN_SEND_PROBE: 581 case SCAN_SEND_PROBE:
401 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
402 local->scan_state = SCAN_SET_CHANNEL;
403
404 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
405 !local->scan_req->n_ssids)
406 break;
407 for (i = 0; i < local->scan_req->n_ssids; i++) 582 for (i = 0; i < local->scan_req->n_ssids; i++)
408 ieee80211_send_probe_req( 583 ieee80211_send_probe_req(
409 sdata, NULL, 584 sdata, NULL,
410 local->scan_req->ssids[i].ssid, 585 local->scan_req->ssids[i].ssid,
411 local->scan_req->ssids[i].ssid_len, 586 local->scan_req->ssids[i].ssid_len,
412 local->scan_req->ie, local->scan_req->ie_len); 587 local->scan_req->ie, local->scan_req->ie_len);
588
589 /*
590 * After sending probe requests, wait for probe responses
591 * on the channel.
592 */
413 next_delay = IEEE80211_CHANNEL_TIME; 593 next_delay = IEEE80211_CHANNEL_TIME;
594 local->scan_state = SCAN_SET_CHANNEL;
414 break; 595 break;
415 } 596 }
416 597
@@ -418,150 +599,53 @@ void ieee80211_scan_work(struct work_struct *work)
418 next_delay); 599 next_delay);
419} 600}
420 601
421 602int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
422int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, 603 struct cfg80211_scan_request *req)
423 struct cfg80211_scan_request *req)
424{ 604{
425 struct ieee80211_local *local = scan_sdata->local; 605 int res;
426 struct ieee80211_sub_if_data *sdata;
427
428 if (!req)
429 return -EINVAL;
430
431 if (local->scan_req && local->scan_req != req)
432 return -EBUSY;
433
434 local->scan_req = req;
435
436 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1)
437 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
438 * BSSID: MACAddress
439 * SSID
440 * ScanType: ACTIVE, PASSIVE
441 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
442 * a Probe frame during active scanning
443 * ChannelList
444 * MinChannelTime (>= ProbeDelay), in TU
445 * MaxChannelTime: (>= MinChannelTime), in TU
446 */
447
448 /* MLME-SCAN.confirm
449 * BSSDescriptionSet
450 * ResultCode: SUCCESS, INVALID_PARAMETERS
451 */
452
453 if (local->sw_scanning || local->hw_scanning) {
454 if (local->scan_sdata == scan_sdata)
455 return 0;
456 return -EBUSY;
457 }
458
459 if (local->ops->hw_scan) {
460 int rc;
461
462 local->hw_scanning = true;
463 rc = local->ops->hw_scan(local_to_hw(local), req);
464 if (rc) {
465 local->hw_scanning = false;
466 return rc;
467 }
468 local->scan_sdata = scan_sdata;
469 return 0;
470 }
471
472 /*
473 * Hardware/driver doesn't support hw_scan, so use software
474 * scanning instead. First send a nullfunc frame with power save
475 * bit on so that AP will buffer the frames for us while we are not
476 * listening, then send probe requests to each channel and wait for
477 * the responses. After all channels are scanned, tune back to the
478 * original channel and send a nullfunc frame with power save bit
479 * off to trigger the AP to send us all the buffered frames.
480 *
481 * Note that while local->sw_scanning is true everything else but
482 * nullfunc frames and probe requests will be dropped in
483 * ieee80211_tx_h_check_assoc().
484 */
485 local->sw_scanning = true;
486 if (local->ops->sw_scan_start)
487 local->ops->sw_scan_start(local_to_hw(local));
488 606
489 mutex_lock(&local->iflist_mtx); 607 mutex_lock(&sdata->local->scan_mtx);
490 list_for_each_entry(sdata, &local->interfaces, list) { 608 res = __ieee80211_start_scan(sdata, req);
491 if (!netif_running(sdata->dev)) 609 mutex_unlock(&sdata->local->scan_mtx);
492 continue;
493 610
494 /* disable beaconing */ 611 return res;
495 if (sdata->vif.type == NL80211_IFTYPE_AP || 612}
496 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
497 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
498 ieee80211_if_config(sdata,
499 IEEE80211_IFCC_BEACON_ENABLED);
500 613
501 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 614int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
502 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { 615 const u8 *ssid, u8 ssid_len)
503 netif_tx_stop_all_queues(sdata->dev); 616{
504 ieee80211_scan_ps_enable(sdata); 617 struct ieee80211_local *local = sdata->local;
505 } 618 int ret = -EBUSY;
506 } else
507 netif_tx_stop_all_queues(sdata->dev);
508 }
509 mutex_unlock(&local->iflist_mtx);
510 619
511 local->scan_state = SCAN_SET_CHANNEL; 620 mutex_lock(&local->scan_mtx);
512 local->scan_channel_idx = 0;
513 local->scan_sdata = scan_sdata;
514 local->scan_req = req;
515 621
516 netif_addr_lock_bh(local->mdev); 622 /* busy scanning */
517 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; 623 if (local->scan_req)
518 local->ops->configure_filter(local_to_hw(local), 624 goto unlock;
519 FIF_BCN_PRBRESP_PROMISC,
520 &local->filter_flags,
521 local->mdev->mc_count,
522 local->mdev->mc_list);
523 netif_addr_unlock_bh(local->mdev);
524 625
525 /* TODO: start scan as soon as all nullfunc frames are ACKed */ 626 memcpy(local->int_scan_req.ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
526 queue_delayed_work(local->hw.workqueue, &local->scan_work, 627 local->int_scan_req.ssids[0].ssid_len = ssid_len;
527 IEEE80211_CHANNEL_TIME);
528 628
529 return 0; 629 ret = __ieee80211_start_scan(sdata, &sdata->local->int_scan_req);
630 unlock:
631 mutex_unlock(&local->scan_mtx);
632 return ret;
530} 633}
531 634
532 635void ieee80211_scan_cancel(struct ieee80211_local *local)
533int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
534 struct cfg80211_scan_request *req)
535{ 636{
536 struct ieee80211_local *local = sdata->local; 637 bool swscan;
537 struct ieee80211_if_managed *ifmgd;
538
539 if (!req)
540 return -EINVAL;
541 638
542 if (local->scan_req && local->scan_req != req) 639 cancel_delayed_work_sync(&local->scan_work);
543 return -EBUSY;
544
545 local->scan_req = req;
546
547 if (sdata->vif.type != NL80211_IFTYPE_STATION)
548 return ieee80211_start_scan(sdata, req);
549 640
550 /* 641 /*
551 * STA has a state machine that might need to defer scanning 642 * Only call this function when a scan can't be
552 * while it's trying to associate/authenticate, therefore we 643 * queued -- mostly at suspend under RTNL.
553 * queue it up to the state machine in that case.
554 */ 644 */
645 mutex_lock(&local->scan_mtx);
646 swscan = local->sw_scanning;
647 mutex_unlock(&local->scan_mtx);
555 648
556 if (local->sw_scanning || local->hw_scanning) { 649 if (swscan)
557 if (local->scan_sdata == sdata) 650 ieee80211_scan_completed(&local->hw, true);
558 return 0;
559 return -EBUSY;
560 }
561
562 ifmgd = &sdata->u.mgd;
563 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
564 queue_work(local->hw.workqueue, &ifmgd->work);
565
566 return 0;
567} 651}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 5f7a2624ed74..68953033403d 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -15,7 +15,7 @@
15 */ 15 */
16 16
17#include <linux/ieee80211.h> 17#include <linux/ieee80211.h>
18#include <net/wireless.h> 18#include <net/cfg80211.h>
19#include <net/mac80211.h> 19#include <net/mac80211.h>
20#include "ieee80211_i.h" 20#include "ieee80211_i.h"
21#include "sta_info.h" 21#include "sta_info.h"
@@ -84,104 +84,3 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
84 mgmt->sa, mgmt->bssid, 84 mgmt->sa, mgmt->bssid,
85 mgmt->u.action.u.measurement.dialog_token); 85 mgmt->u.action.u.measurement.dialog_token);
86} 86}
87
88void ieee80211_chswitch_work(struct work_struct *work)
89{
90 struct ieee80211_sub_if_data *sdata =
91 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
92 struct ieee80211_bss *bss;
93 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
94
95 if (!netif_running(sdata->dev))
96 return;
97
98 bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
99 sdata->local->hw.conf.channel->center_freq,
100 ifmgd->ssid, ifmgd->ssid_len);
101 if (!bss)
102 goto exit;
103
104 sdata->local->oper_channel = sdata->local->csa_channel;
105 /* XXX: shouldn't really modify cfg80211-owned data! */
106 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
107 bss->cbss.channel = sdata->local->oper_channel;
108
109 ieee80211_rx_bss_put(sdata->local, bss);
110exit:
111 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
112 ieee80211_wake_queues_by_reason(&sdata->local->hw,
113 IEEE80211_QUEUE_STOP_REASON_CSA);
114}
115
116void ieee80211_chswitch_timer(unsigned long data)
117{
118 struct ieee80211_sub_if_data *sdata =
119 (struct ieee80211_sub_if_data *) data;
120 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
121
122 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
123}
124
125void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata,
126 struct ieee80211_channel_sw_ie *sw_elem,
127 struct ieee80211_bss *bss)
128{
129 struct ieee80211_channel *new_ch;
130 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
131 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
132
133 /* FIXME: Handle ADHOC later */
134 if (sdata->vif.type != NL80211_IFTYPE_STATION)
135 return;
136
137 if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
138 return;
139
140 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
141 return;
142
143 /* Disregard subsequent beacons if we are already running a timer
144 processing a CSA */
145
146 if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
147 return;
148
149 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
150 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
151 return;
152
153 sdata->local->csa_channel = new_ch;
154
155 if (sw_elem->count <= 1) {
156 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
157 } else {
158 ieee80211_stop_queues_by_reason(&sdata->local->hw,
159 IEEE80211_QUEUE_STOP_REASON_CSA);
160 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
161 mod_timer(&ifmgd->chswitch_timer,
162 jiffies +
163 msecs_to_jiffies(sw_elem->count *
164 bss->cbss.beacon_interval));
165 }
166}
167
168void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
169 u16 capab_info, u8 *pwr_constr_elem,
170 u8 pwr_constr_elem_len)
171{
172 struct ieee80211_conf *conf = &sdata->local->hw.conf;
173
174 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
175 return;
176
177 /* Power constraint IE length should be 1 octet */
178 if (pwr_constr_elem_len != 1)
179 return;
180
181 if ((*pwr_constr_elem <= conf->channel->max_power) &&
182 (*pwr_constr_elem != sdata->local->power_constr_level)) {
183 sdata->local->power_constr_level = *pwr_constr_elem;
184 ieee80211_hw_config(sdata->local, 0);
185 }
186}
187
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c5f14e6bbde2..d5611d8fd0d6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -19,6 +19,7 @@
19 19
20#include <net/mac80211.h> 20#include <net/mac80211.h>
21#include "ieee80211_i.h" 21#include "ieee80211_i.h"
22#include "driver-ops.h"
22#include "rate.h" 23#include "rate.h"
23#include "sta_info.h" 24#include "sta_info.h"
24#include "debugfs_sta.h" 25#include "debugfs_sta.h"
@@ -292,6 +293,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
292 skb_queue_head_init(&sta->ps_tx_buf); 293 skb_queue_head_init(&sta->ps_tx_buf);
293 skb_queue_head_init(&sta->tx_filtered); 294 skb_queue_head_init(&sta->tx_filtered);
294 295
296 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
297 sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX);
298
295#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 299#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
296 printk(KERN_DEBUG "%s: Allocated STA %pM\n", 300 printk(KERN_DEBUG "%s: Allocated STA %pM\n",
297 wiphy_name(local->hw.wiphy), sta->sta.addr); 301 wiphy_name(local->hw.wiphy), sta->sta.addr);
@@ -346,8 +350,7 @@ int sta_info_insert(struct sta_info *sta)
346 struct ieee80211_sub_if_data, 350 struct ieee80211_sub_if_data,
347 u.ap); 351 u.ap);
348 352
349 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 353 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta);
350 STA_NOTIFY_ADD, &sta->sta);
351 } 354 }
352 355
353#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 356#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -405,8 +408,7 @@ static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
405 408
406 if (sta->local->ops->set_tim) { 409 if (sta->local->ops->set_tim) {
407 sta->local->tim_in_locked_section = true; 410 sta->local->tim_in_locked_section = true;
408 sta->local->ops->set_tim(local_to_hw(sta->local), 411 drv_set_tim(sta->local, &sta->sta, true);
409 &sta->sta, true);
410 sta->local->tim_in_locked_section = false; 412 sta->local->tim_in_locked_section = false;
411 } 413 }
412} 414}
@@ -431,8 +433,7 @@ static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
431 433
432 if (sta->local->ops->set_tim) { 434 if (sta->local->ops->set_tim) {
433 sta->local->tim_in_locked_section = true; 435 sta->local->tim_in_locked_section = true;
434 sta->local->ops->set_tim(local_to_hw(sta->local), 436 drv_set_tim(sta->local, &sta->sta, false);
435 &sta->sta, false);
436 sta->local->tim_in_locked_section = false; 437 sta->local->tim_in_locked_section = false;
437 } 438 }
438} 439}
@@ -482,8 +483,8 @@ static void __sta_info_unlink(struct sta_info **sta)
482 struct ieee80211_sub_if_data, 483 struct ieee80211_sub_if_data,
483 u.ap); 484 u.ap);
484 485
485 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 486 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
486 STA_NOTIFY_REMOVE, &(*sta)->sta); 487 &(*sta)->sta);
487 } 488 }
488 489
489 if (ieee80211_vif_is_mesh(&sdata->vif)) { 490 if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -543,9 +544,8 @@ void sta_info_unlink(struct sta_info **sta)
543 spin_unlock_irqrestore(&local->sta_lock, flags); 544 spin_unlock_irqrestore(&local->sta_lock, flags);
544} 545}
545 546
546static inline int sta_info_buffer_expired(struct ieee80211_local *local, 547static int sta_info_buffer_expired(struct sta_info *sta,
547 struct sta_info *sta, 548 struct sk_buff *skb)
548 struct sk_buff *skb)
549{ 549{
550 struct ieee80211_tx_info *info; 550 struct ieee80211_tx_info *info;
551 int timeout; 551 int timeout;
@@ -556,8 +556,9 @@ static inline int sta_info_buffer_expired(struct ieee80211_local *local,
556 info = IEEE80211_SKB_CB(skb); 556 info = IEEE80211_SKB_CB(skb);
557 557
558 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 558 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
559 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / 559 timeout = (sta->listen_interval *
560 15625) * HZ; 560 sta->sdata->vif.bss_conf.beacon_int *
561 32 / 15625) * HZ;
561 if (timeout < STA_TX_BUFFER_EXPIRE) 562 if (timeout < STA_TX_BUFFER_EXPIRE)
562 timeout = STA_TX_BUFFER_EXPIRE; 563 timeout = STA_TX_BUFFER_EXPIRE;
563 return time_after(jiffies, info->control.jiffies + timeout); 564 return time_after(jiffies, info->control.jiffies + timeout);
@@ -577,7 +578,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
577 for (;;) { 578 for (;;) {
578 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 579 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
579 skb = skb_peek(&sta->ps_tx_buf); 580 skb = skb_peek(&sta->ps_tx_buf);
580 if (sta_info_buffer_expired(local, sta, skb)) 581 if (sta_info_buffer_expired(sta, skb))
581 skb = __skb_dequeue(&sta->ps_tx_buf); 582 skb = __skb_dequeue(&sta->ps_tx_buf);
582 else 583 else
583 skb = NULL; 584 skb = NULL;
@@ -610,6 +611,9 @@ static void sta_info_cleanup(unsigned long data)
610 sta_info_cleanup_expire_buffered(local, sta); 611 sta_info_cleanup_expire_buffered(local, sta);
611 rcu_read_unlock(); 612 rcu_read_unlock();
612 613
614 if (local->quiescing)
615 return;
616
613 local->sta_cleanup.expires = 617 local->sta_cleanup.expires =
614 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 618 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
615 add_timer(&local->sta_cleanup); 619 add_timer(&local->sta_cleanup);
@@ -686,41 +690,10 @@ static void sta_info_debugfs_add_work(struct work_struct *work)
686} 690}
687#endif 691#endif
688 692
689static void __ieee80211_run_pending_flush(struct ieee80211_local *local)
690{
691 struct sta_info *sta;
692 unsigned long flags;
693
694 ASSERT_RTNL();
695
696 spin_lock_irqsave(&local->sta_lock, flags);
697 while (!list_empty(&local->sta_flush_list)) {
698 sta = list_first_entry(&local->sta_flush_list,
699 struct sta_info, list);
700 list_del(&sta->list);
701 spin_unlock_irqrestore(&local->sta_lock, flags);
702 sta_info_destroy(sta);
703 spin_lock_irqsave(&local->sta_lock, flags);
704 }
705 spin_unlock_irqrestore(&local->sta_lock, flags);
706}
707
708static void ieee80211_sta_flush_work(struct work_struct *work)
709{
710 struct ieee80211_local *local =
711 container_of(work, struct ieee80211_local, sta_flush_work);
712
713 rtnl_lock();
714 __ieee80211_run_pending_flush(local);
715 rtnl_unlock();
716}
717
718void sta_info_init(struct ieee80211_local *local) 693void sta_info_init(struct ieee80211_local *local)
719{ 694{
720 spin_lock_init(&local->sta_lock); 695 spin_lock_init(&local->sta_lock);
721 INIT_LIST_HEAD(&local->sta_list); 696 INIT_LIST_HEAD(&local->sta_list);
722 INIT_LIST_HEAD(&local->sta_flush_list);
723 INIT_WORK(&local->sta_flush_work, ieee80211_sta_flush_work);
724 697
725 setup_timer(&local->sta_cleanup, sta_info_cleanup, 698 setup_timer(&local->sta_cleanup, sta_info_cleanup,
726 (unsigned long)local); 699 (unsigned long)local);
@@ -741,7 +714,6 @@ int sta_info_start(struct ieee80211_local *local)
741void sta_info_stop(struct ieee80211_local *local) 714void sta_info_stop(struct ieee80211_local *local)
742{ 715{
743 del_timer(&local->sta_cleanup); 716 del_timer(&local->sta_cleanup);
744 cancel_work_sync(&local->sta_flush_work);
745#ifdef CONFIG_MAC80211_DEBUGFS 717#ifdef CONFIG_MAC80211_DEBUGFS
746 /* 718 /*
747 * Make sure the debugfs adding work isn't pending after this 719 * Make sure the debugfs adding work isn't pending after this
@@ -752,10 +724,7 @@ void sta_info_stop(struct ieee80211_local *local)
752 cancel_work_sync(&local->sta_debugfs_add); 724 cancel_work_sync(&local->sta_debugfs_add);
753#endif 725#endif
754 726
755 rtnl_lock();
756 sta_info_flush(local, NULL); 727 sta_info_flush(local, NULL);
757 __ieee80211_run_pending_flush(local);
758 rtnl_unlock();
759} 728}
760 729
761/** 730/**
@@ -767,7 +736,7 @@ void sta_info_stop(struct ieee80211_local *local)
767 * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs 736 * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs
768 */ 737 */
769int sta_info_flush(struct ieee80211_local *local, 738int sta_info_flush(struct ieee80211_local *local,
770 struct ieee80211_sub_if_data *sdata) 739 struct ieee80211_sub_if_data *sdata)
771{ 740{
772 struct sta_info *sta, *tmp; 741 struct sta_info *sta, *tmp;
773 LIST_HEAD(tmp_list); 742 LIST_HEAD(tmp_list);
@@ -775,7 +744,6 @@ int sta_info_flush(struct ieee80211_local *local,
775 unsigned long flags; 744 unsigned long flags;
776 745
777 might_sleep(); 746 might_sleep();
778 ASSERT_RTNL();
779 747
780 spin_lock_irqsave(&local->sta_lock, flags); 748 spin_lock_irqsave(&local->sta_lock, flags);
781 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 749 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
@@ -795,39 +763,6 @@ int sta_info_flush(struct ieee80211_local *local,
795 return ret; 763 return ret;
796} 764}
797 765
798/**
799 * sta_info_flush_delayed - flush matching STA entries from the STA table
800 *
801 * This function unlinks all stations for a given interface and queues
802 * them for freeing. Note that the workqueue function scheduled here has
803 * to run before any new keys can be added to the system to avoid set_key()
804 * callback ordering issues.
805 *
806 * @sdata: the interface
807 */
808void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata)
809{
810 struct ieee80211_local *local = sdata->local;
811 struct sta_info *sta, *tmp;
812 unsigned long flags;
813 bool work = false;
814
815 spin_lock_irqsave(&local->sta_lock, flags);
816 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
817 if (sdata == sta->sdata) {
818 __sta_info_unlink(&sta);
819 if (sta) {
820 list_add_tail(&sta->list,
821 &local->sta_flush_list);
822 work = true;
823 }
824 }
825 }
826 if (work)
827 schedule_work(&local->sta_flush_work);
828 spin_unlock_irqrestore(&local->sta_lock, flags);
829}
830
831void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 766void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
832 unsigned long exp_time) 767 unsigned long exp_time)
833{ 768{
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5534d489f506..49a1a1f76511 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -88,6 +88,7 @@ struct tid_ampdu_tx {
88 * struct tid_ampdu_rx - TID aggregation information (Rx). 88 * struct tid_ampdu_rx - TID aggregation information (Rx).
89 * 89 *
90 * @reorder_buf: buffer to reorder incoming aggregated MPDUs 90 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
91 * @reorder_time: jiffies when skb was added
91 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 92 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
92 * @head_seq_num: head sequence number in reordering buffer. 93 * @head_seq_num: head sequence number in reordering buffer.
93 * @stored_mpdu_num: number of MPDUs in reordering buffer 94 * @stored_mpdu_num: number of MPDUs in reordering buffer
@@ -99,6 +100,7 @@ struct tid_ampdu_tx {
99 */ 100 */
100struct tid_ampdu_rx { 101struct tid_ampdu_rx {
101 struct sk_buff **reorder_buf; 102 struct sk_buff **reorder_buf;
103 unsigned long *reorder_time;
102 struct timer_list session_timer; 104 struct timer_list session_timer;
103 u16 head_seq_num; 105 u16 head_seq_num;
104 u16 stored_mpdu_num; 106 u16 stored_mpdu_num;
@@ -214,6 +216,7 @@ struct sta_ampdu_mlme {
214 * @plink_state: peer link state 216 * @plink_state: peer link state
215 * @plink_timeout: timeout of peer link 217 * @plink_timeout: timeout of peer link
216 * @plink_timer: peer link watch timer 218 * @plink_timer: peer link watch timer
219 * @plink_timer_was_running: used by suspend/resume to restore timers
217 * @debugfs: debug filesystem info 220 * @debugfs: debug filesystem info
218 * @sta: station information we share with the driver 221 * @sta: station information we share with the driver
219 */ 222 */
@@ -291,6 +294,7 @@ struct sta_info {
291 __le16 reason; 294 __le16 reason;
292 u8 plink_retries; 295 u8 plink_retries;
293 bool ignore_plink_timer; 296 bool ignore_plink_timer;
297 bool plink_timer_was_running;
294 enum plink_state plink_state; 298 enum plink_state plink_state;
295 u32 plink_timeout; 299 u32 plink_timeout;
296 struct timer_list plink_timer; 300 struct timer_list plink_timer;
@@ -442,8 +446,7 @@ void sta_info_init(struct ieee80211_local *local);
442int sta_info_start(struct ieee80211_local *local); 446int sta_info_start(struct ieee80211_local *local);
443void sta_info_stop(struct ieee80211_local *local); 447void sta_info_stop(struct ieee80211_local *local);
444int sta_info_flush(struct ieee80211_local *local, 448int sta_info_flush(struct ieee80211_local *local,
445 struct ieee80211_sub_if_data *sdata); 449 struct ieee80211_sub_if_data *sdata);
446void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata);
447void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 450void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
448 unsigned long exp_time); 451 unsigned long exp_time);
449 452
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 38fa111d2dc6..964b7faa7f17 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -13,6 +13,7 @@
13#include <asm/unaligned.h> 13#include <asm/unaligned.h>
14 14
15#include <net/mac80211.h> 15#include <net/mac80211.h>
16#include "driver-ops.h"
16#include "key.h" 17#include "key.h"
17#include "tkip.h" 18#include "tkip.h"
18#include "wep.h" 19#include "wep.h"
@@ -307,9 +308,8 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
307 if (is_multicast_ether_addr(ra)) 308 if (is_multicast_ether_addr(ra))
308 sta_addr = bcast; 309 sta_addr = bcast;
309 310
310 key->local->ops->update_tkip_key( 311 drv_update_tkip_key(key->local, &key->conf, sta_addr,
311 local_to_hw(key->local), &key->conf, 312 iv32, key->u.tkip.rx[queue].p1k);
312 sta_addr, iv32, key->u.tkip.rx[queue].p1k);
313 } 313 }
314 } 314 }
315 315
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 63656266d567..a910148b8228 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -25,6 +25,7 @@
25#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26 26
27#include "ieee80211_i.h" 27#include "ieee80211_i.h"
28#include "driver-ops.h"
28#include "led.h" 29#include "led.h"
29#include "mesh.h" 30#include "mesh.h"
30#include "wep.h" 31#include "wep.h"
@@ -409,8 +410,24 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
409 sta->sta.addr); 410 sta->sta.addr);
410 } 411 }
411#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 412#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
412 clear_sta_flags(sta, WLAN_STA_PSPOLL); 413 if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
414 /*
415 * The sleeping station with pending data is now snoozing.
416 * It queried us for its buffered frames and will go back
417 * to deep sleep once it got everything.
418 *
419 * inform the driver, in case the hardware does powersave
420 * frame filtering and keeps a station blacklist on its own
421 * (e.g: p54), so that frames can be delivered unimpeded.
422 *
423 * Note: It should be save to disable the filter now.
424 * As, it is really unlikely that we still have any pending
425 * frame for this station in the hw's buffers/fifos left,
426 * that is not rejected with a unsuccessful tx_status yet.
427 */
413 428
429 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
430 }
414 return TX_CONTINUE; 431 return TX_CONTINUE;
415} 432}
416 433
@@ -429,7 +446,7 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
429static ieee80211_tx_result debug_noinline 446static ieee80211_tx_result debug_noinline
430ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 447ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
431{ 448{
432 struct ieee80211_key *key; 449 struct ieee80211_key *key = NULL;
433 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 450 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
434 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 451 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
435 452
@@ -500,7 +517,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
500 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 517 sband = tx->local->hw.wiphy->bands[tx->channel->band];
501 518
502 len = min_t(int, tx->skb->len + FCS_LEN, 519 len = min_t(int, tx->skb->len + FCS_LEN,
503 tx->local->fragmentation_threshold); 520 tx->local->hw.wiphy->frag_threshold);
504 521
505 /* set up the tx rate control struct we give the RC algo */ 522 /* set up the tx rate control struct we give the RC algo */
506 txrc.hw = local_to_hw(tx->local); 523 txrc.hw = local_to_hw(tx->local);
@@ -511,8 +528,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
511 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; 528 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx;
512 529
513 /* set up RTS protection if desired */ 530 /* set up RTS protection if desired */
514 if (tx->local->rts_threshold < IEEE80211_MAX_RTS_THRESHOLD && 531 if (len > tx->local->hw.wiphy->rts_threshold) {
515 len > tx->local->rts_threshold) {
516 txrc.rts = rts = true; 532 txrc.rts = rts = true;
517 } 533 }
518 534
@@ -542,6 +558,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
542 if (unlikely(!info->control.rates[0].count)) 558 if (unlikely(!info->control.rates[0].count))
543 info->control.rates[0].count = 1; 559 info->control.rates[0].count = 1;
544 560
561 if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
562 (info->flags & IEEE80211_TX_CTL_NO_ACK)))
563 info->control.rates[0].count = 1;
564
545 if (is_multicast_ether_addr(hdr->addr1)) { 565 if (is_multicast_ether_addr(hdr->addr1)) {
546 /* 566 /*
547 * XXX: verify the rate is in the basic rateset 567 * XXX: verify the rate is in the basic rateset
@@ -754,7 +774,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
754 struct sk_buff *skb = tx->skb; 774 struct sk_buff *skb = tx->skb;
755 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 775 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
756 struct ieee80211_hdr *hdr = (void *)skb->data; 776 struct ieee80211_hdr *hdr = (void *)skb->data;
757 int frag_threshold = tx->local->fragmentation_threshold; 777 int frag_threshold = tx->local->hw.wiphy->frag_threshold;
758 int hdrlen; 778 int hdrlen;
759 int fragnum; 779 int fragnum;
760 780
@@ -852,6 +872,8 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
852 872
853 do { 873 do {
854 hdr = (void *) skb->data; 874 hdr = (void *) skb->data;
875 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
876 break; /* must not overwrite AID */
855 next_len = skb->next ? skb->next->len : 0; 877 next_len = skb->next ? skb->next->len : 0;
856 group_addr = is_multicast_ether_addr(hdr->addr1); 878 group_addr = is_multicast_ether_addr(hdr->addr1);
857 879
@@ -1067,12 +1089,15 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1067 info->flags |= IEEE80211_TX_CTL_NO_ACK; 1089 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1068 } else { 1090 } else {
1069 tx->flags |= IEEE80211_TX_UNICAST; 1091 tx->flags |= IEEE80211_TX_UNICAST;
1070 info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 1092 if (unlikely(local->wifi_wme_noack_test))
1093 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1094 else
1095 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1071 } 1096 }
1072 1097
1073 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 1098 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
1074 if ((tx->flags & IEEE80211_TX_UNICAST) && 1099 if ((tx->flags & IEEE80211_TX_UNICAST) &&
1075 skb->len + FCS_LEN > local->fragmentation_threshold && 1100 skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
1076 !(info->flags & IEEE80211_TX_CTL_AMPDU)) 1101 !(info->flags & IEEE80211_TX_CTL_AMPDU))
1077 tx->flags |= IEEE80211_TX_FRAGMENTED; 1102 tx->flags |= IEEE80211_TX_FRAGMENTED;
1078 else 1103 else
@@ -1147,7 +1172,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1147 1172
1148 next = skb->next; 1173 next = skb->next;
1149 len = skb->len; 1174 len = skb->len;
1150 ret = local->ops->tx(local_to_hw(local), skb); 1175 ret = drv_tx(local, skb);
1151 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { 1176 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1152 dev_kfree_skb(skb); 1177 dev_kfree_skb(skb);
1153 ret = NETDEV_TX_OK; 1178 ret = NETDEV_TX_OK;
@@ -2086,18 +2111,18 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2086 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 2111 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2087 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 2112 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2088 struct ieee80211_hdr *hdr; 2113 struct ieee80211_hdr *hdr;
2114 struct sk_buff *presp = rcu_dereference(ifibss->presp);
2089 2115
2090 if (!ifibss->probe_resp) 2116 if (!presp)
2091 goto out; 2117 goto out;
2092 2118
2093 skb = skb_copy(ifibss->probe_resp, GFP_ATOMIC); 2119 skb = skb_copy(presp, GFP_ATOMIC);
2094 if (!skb) 2120 if (!skb)
2095 goto out; 2121 goto out;
2096 2122
2097 hdr = (struct ieee80211_hdr *) skb->data; 2123 hdr = (struct ieee80211_hdr *) skb->data;
2098 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2124 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2099 IEEE80211_STYPE_BEACON); 2125 IEEE80211_STYPE_BEACON);
2100
2101 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2126 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2102 struct ieee80211_mgmt *mgmt; 2127 struct ieee80211_mgmt *mgmt;
2103 u8 *pos; 2128 u8 *pos;
@@ -2117,7 +2142,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2117 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2142 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2118 /* BSSID is left zeroed, wildcard value */ 2143 /* BSSID is left zeroed, wildcard value */
2119 mgmt->u.beacon.beacon_int = 2144 mgmt->u.beacon.beacon_int =
2120 cpu_to_le16(local->hw.conf.beacon_int); 2145 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2121 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2146 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
2122 2147
2123 pos = skb_put(skb, 2); 2148 pos = skb_put(skb, 2);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index fdf432f14554..949d857debd8 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -20,27 +20,21 @@
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <linux/bitmap.h> 22#include <linux/bitmap.h>
23#include <linux/crc32.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/cfg80211.h> 25#include <net/cfg80211.h>
25#include <net/rtnetlink.h> 26#include <net/rtnetlink.h>
26 27
27#include "ieee80211_i.h" 28#include "ieee80211_i.h"
29#include "driver-ops.h"
28#include "rate.h" 30#include "rate.h"
29#include "mesh.h" 31#include "mesh.h"
30#include "wme.h" 32#include "wme.h"
33#include "led.h"
31 34
32/* privid for wiphys to determine whether they belong to us or not */ 35/* privid for wiphys to determine whether they belong to us or not */
33void *mac80211_wiphy_privid = &mac80211_wiphy_privid; 36void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
34 37
35/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
36/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
37const unsigned char rfc1042_header[] __aligned(2) =
38 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
39
40/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
41const unsigned char bridge_tunnel_header[] __aligned(2) =
42 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
43
44struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) 38struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
45{ 39{
46 struct ieee80211_local *local; 40 struct ieee80211_local *local;
@@ -100,70 +94,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
100 return NULL; 94 return NULL;
101} 95}
102 96
103unsigned int ieee80211_hdrlen(__le16 fc)
104{
105 unsigned int hdrlen = 24;
106
107 if (ieee80211_is_data(fc)) {
108 if (ieee80211_has_a4(fc))
109 hdrlen = 30;
110 if (ieee80211_is_data_qos(fc))
111 hdrlen += IEEE80211_QOS_CTL_LEN;
112 goto out;
113 }
114
115 if (ieee80211_is_ctl(fc)) {
116 /*
117 * ACK and CTS are 10 bytes, all others 16. To see how
118 * to get this condition consider
119 * subtype mask: 0b0000000011110000 (0x00F0)
120 * ACK subtype: 0b0000000011010000 (0x00D0)
121 * CTS subtype: 0b0000000011000000 (0x00C0)
122 * bits that matter: ^^^ (0x00E0)
123 * value of those: 0b0000000011000000 (0x00C0)
124 */
125 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
126 hdrlen = 10;
127 else
128 hdrlen = 16;
129 }
130out:
131 return hdrlen;
132}
133EXPORT_SYMBOL(ieee80211_hdrlen);
134
135unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
136{
137 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data;
138 unsigned int hdrlen;
139
140 if (unlikely(skb->len < 10))
141 return 0;
142 hdrlen = ieee80211_hdrlen(hdr->frame_control);
143 if (unlikely(hdrlen > skb->len))
144 return 0;
145 return hdrlen;
146}
147EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
148
149int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
150{
151 int ae = meshhdr->flags & IEEE80211S_FLAGS_AE;
152 /* 7.1.3.5a.2 */
153 switch (ae) {
154 case 0:
155 return 6;
156 case 1:
157 return 12;
158 case 2:
159 return 18;
160 case 3:
161 return 24;
162 default:
163 return 6;
164 }
165}
166
167void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) 97void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
168{ 98{
169 struct sk_buff *skb = tx->skb; 99 struct sk_buff *skb = tx->skb;
@@ -536,8 +466,16 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
536void ieee802_11_parse_elems(u8 *start, size_t len, 466void ieee802_11_parse_elems(u8 *start, size_t len,
537 struct ieee802_11_elems *elems) 467 struct ieee802_11_elems *elems)
538{ 468{
469 ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
470}
471
472u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
473 struct ieee802_11_elems *elems,
474 u64 filter, u32 crc)
475{
539 size_t left = len; 476 size_t left = len;
540 u8 *pos = start; 477 u8 *pos = start;
478 bool calc_crc = filter != 0;
541 479
542 memset(elems, 0, sizeof(*elems)); 480 memset(elems, 0, sizeof(*elems));
543 elems->ie_start = start; 481 elems->ie_start = start;
@@ -551,7 +489,10 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
551 left -= 2; 489 left -= 2;
552 490
553 if (elen > left) 491 if (elen > left)
554 return; 492 break;
493
494 if (calc_crc && id < 64 && (filter & BIT(id)))
495 crc = crc32_be(crc, pos - 2, elen + 2);
555 496
556 switch (id) { 497 switch (id) {
557 case WLAN_EID_SSID: 498 case WLAN_EID_SSID:
@@ -575,8 +516,10 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
575 elems->cf_params_len = elen; 516 elems->cf_params_len = elen;
576 break; 517 break;
577 case WLAN_EID_TIM: 518 case WLAN_EID_TIM:
578 elems->tim = pos; 519 if (elen >= sizeof(struct ieee80211_tim_ie)) {
579 elems->tim_len = elen; 520 elems->tim = (void *)pos;
521 elems->tim_len = elen;
522 }
580 break; 523 break;
581 case WLAN_EID_IBSS_PARAMS: 524 case WLAN_EID_IBSS_PARAMS:
582 elems->ibss_params = pos; 525 elems->ibss_params = pos;
@@ -586,15 +529,20 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
586 elems->challenge = pos; 529 elems->challenge = pos;
587 elems->challenge_len = elen; 530 elems->challenge_len = elen;
588 break; 531 break;
589 case WLAN_EID_WPA: 532 case WLAN_EID_VENDOR_SPECIFIC:
590 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && 533 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
591 pos[2] == 0xf2) { 534 pos[2] == 0xf2) {
592 /* Microsoft OUI (00:50:F2) */ 535 /* Microsoft OUI (00:50:F2) */
536
537 if (calc_crc)
538 crc = crc32_be(crc, pos - 2, elen + 2);
539
593 if (pos[3] == 1) { 540 if (pos[3] == 1) {
594 /* OUI Type 1 - WPA IE */ 541 /* OUI Type 1 - WPA IE */
595 elems->wpa = pos; 542 elems->wpa = pos;
596 elems->wpa_len = elen; 543 elems->wpa_len = elen;
597 } else if (elen >= 5 && pos[3] == 2) { 544 } else if (elen >= 5 && pos[3] == 2) {
545 /* OUI Type 2 - WMM IE */
598 if (pos[4] == 0) { 546 if (pos[4] == 0) {
599 elems->wmm_info = pos; 547 elems->wmm_info = pos;
600 elems->wmm_info_len = elen; 548 elems->wmm_info_len = elen;
@@ -679,32 +627,70 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
679 left -= elen; 627 left -= elen;
680 pos += elen; 628 pos += elen;
681 } 629 }
630
631 return crc;
682} 632}
683 633
684void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) 634void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
685{ 635{
686 struct ieee80211_local *local = sdata->local; 636 struct ieee80211_local *local = sdata->local;
687 struct ieee80211_tx_queue_params qparam; 637 struct ieee80211_tx_queue_params qparam;
688 int i; 638 int queue;
639 bool use_11b;
640 int aCWmin, aCWmax;
689 641
690 if (!local->ops->conf_tx) 642 if (!local->ops->conf_tx)
691 return; 643 return;
692 644
693 memset(&qparam, 0, sizeof(qparam)); 645 memset(&qparam, 0, sizeof(qparam));
694 646
695 qparam.aifs = 2; 647 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
648 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
696 649
697 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 650 for (queue = 0; queue < local_to_hw(local)->queues; queue++) {
698 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) 651 /* Set defaults according to 802.11-2007 Table 7-37 */
699 qparam.cw_min = 31; 652 aCWmax = 1023;
700 else 653 if (use_11b)
701 qparam.cw_min = 15; 654 aCWmin = 31;
702 655 else
703 qparam.cw_max = 1023; 656 aCWmin = 15;
704 qparam.txop = 0; 657
658 switch (queue) {
659 case 3: /* AC_BK */
660 qparam.cw_max = aCWmin;
661 qparam.cw_min = aCWmax;
662 qparam.txop = 0;
663 qparam.aifs = 7;
664 break;
665 default: /* never happens but let's not leave undefined */
666 case 2: /* AC_BE */
667 qparam.cw_max = aCWmin;
668 qparam.cw_min = aCWmax;
669 qparam.txop = 0;
670 qparam.aifs = 3;
671 break;
672 case 1: /* AC_VI */
673 qparam.cw_max = aCWmin;
674 qparam.cw_min = (aCWmin + 1) / 2 - 1;
675 if (use_11b)
676 qparam.txop = 6016/32;
677 else
678 qparam.txop = 3008/32;
679 qparam.aifs = 2;
680 break;
681 case 0: /* AC_VO */
682 qparam.cw_max = (aCWmin + 1) / 2 - 1;
683 qparam.cw_min = (aCWmin + 1) / 4 - 1;
684 if (use_11b)
685 qparam.txop = 3264/32;
686 else
687 qparam.txop = 1504/32;
688 qparam.aifs = 2;
689 break;
690 }
705 691
706 for (i = 0; i < local_to_hw(local)->queues; i++) 692 drv_conf_tx(local, queue, &qparam);
707 local->ops->conf_tx(local_to_hw(local), i, &qparam); 693 }
708} 694}
709 695
710void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 696void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -831,16 +817,73 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
831 ieee80211_tx_skb(sdata, skb, encrypt); 817 ieee80211_tx_skb(sdata, skb, encrypt);
832} 818}
833 819
820int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
821 const u8 *ie, size_t ie_len)
822{
823 struct ieee80211_supported_band *sband;
824 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL;
825 int i;
826
827 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
828
829 pos = buffer;
830
831 *pos++ = WLAN_EID_SUPP_RATES;
832 supp_rates_len = pos;
833 *pos++ = 0;
834
835 for (i = 0; i < sband->n_bitrates; i++) {
836 struct ieee80211_rate *rate = &sband->bitrates[i];
837
838 if (esupp_rates_len) {
839 *esupp_rates_len += 1;
840 } else if (*supp_rates_len == 8) {
841 *pos++ = WLAN_EID_EXT_SUPP_RATES;
842 esupp_rates_len = pos;
843 *pos++ = 1;
844 } else
845 *supp_rates_len += 1;
846
847 *pos++ = rate->bitrate / 5;
848 }
849
850 if (sband->ht_cap.ht_supported) {
851 __le16 tmp = cpu_to_le16(sband->ht_cap.cap);
852
853 *pos++ = WLAN_EID_HT_CAPABILITY;
854 *pos++ = sizeof(struct ieee80211_ht_cap);
855 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
856 memcpy(pos, &tmp, sizeof(u16));
857 pos += sizeof(u16);
858 /* TODO: needs a define here for << 2 */
859 *pos++ = sband->ht_cap.ampdu_factor |
860 (sband->ht_cap.ampdu_density << 2);
861 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
862 pos += sizeof(sband->ht_cap.mcs);
863 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
864 }
865
866 /*
867 * If adding more here, adjust code in main.c
868 * that calculates local->scan_ies_len.
869 */
870
871 if (ie) {
872 memcpy(pos, ie, ie_len);
873 pos += ie_len;
874 }
875
876 return pos - buffer;
877}
878
834void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 879void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
835 u8 *ssid, size_t ssid_len, 880 const u8 *ssid, size_t ssid_len,
836 u8 *ie, size_t ie_len) 881 const u8 *ie, size_t ie_len)
837{ 882{
838 struct ieee80211_local *local = sdata->local; 883 struct ieee80211_local *local = sdata->local;
839 struct ieee80211_supported_band *sband;
840 struct sk_buff *skb; 884 struct sk_buff *skb;
841 struct ieee80211_mgmt *mgmt; 885 struct ieee80211_mgmt *mgmt;
842 u8 *pos, *supp_rates, *esupp_rates = NULL; 886 u8 *pos;
843 int i;
844 887
845 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 888 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
846 ie_len); 889 ie_len);
@@ -867,31 +910,9 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
867 *pos++ = WLAN_EID_SSID; 910 *pos++ = WLAN_EID_SSID;
868 *pos++ = ssid_len; 911 *pos++ = ssid_len;
869 memcpy(pos, ssid, ssid_len); 912 memcpy(pos, ssid, ssid_len);
913 pos += ssid_len;
870 914
871 supp_rates = skb_put(skb, 2); 915 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len));
872 supp_rates[0] = WLAN_EID_SUPP_RATES;
873 supp_rates[1] = 0;
874 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
875
876 for (i = 0; i < sband->n_bitrates; i++) {
877 struct ieee80211_rate *rate = &sband->bitrates[i];
878 if (esupp_rates) {
879 pos = skb_put(skb, 1);
880 esupp_rates[1]++;
881 } else if (supp_rates[1] == 8) {
882 esupp_rates = skb_put(skb, 3);
883 esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
884 esupp_rates[1] = 1;
885 pos = &esupp_rates[2];
886 } else {
887 pos = skb_put(skb, 1);
888 supp_rates[1]++;
889 }
890 *pos = rate->bitrate / 5;
891 }
892
893 if (ie)
894 memcpy(skb_put(skb, ie_len), ie, ie_len);
895 916
896 ieee80211_tx_skb(sdata, skb, 0); 917 ieee80211_tx_skb(sdata, skb, 0);
897} 918}
@@ -931,3 +952,151 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
931 } 952 }
932 return supp_rates; 953 return supp_rates;
933} 954}
955
956int ieee80211_reconfig(struct ieee80211_local *local)
957{
958 struct ieee80211_hw *hw = &local->hw;
959 struct ieee80211_sub_if_data *sdata;
960 struct ieee80211_if_init_conf conf;
961 struct sta_info *sta;
962 unsigned long flags;
963 int res;
964 bool from_suspend = local->suspended;
965
966 /*
967 * We're going to start the hardware, at that point
968 * we are no longer suspended and can RX frames.
969 */
970 local->suspended = false;
971
972 /* restart hardware */
973 if (local->open_count) {
974 res = drv_start(local);
975
976 ieee80211_led_radio(local, hw->conf.radio_enabled);
977 }
978
979 /* add interfaces */
980 list_for_each_entry(sdata, &local->interfaces, list) {
981 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
982 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
983 netif_running(sdata->dev)) {
984 conf.vif = &sdata->vif;
985 conf.type = sdata->vif.type;
986 conf.mac_addr = sdata->dev->dev_addr;
987 res = drv_add_interface(local, &conf);
988 }
989 }
990
991 /* add STAs back */
992 if (local->ops->sta_notify) {
993 spin_lock_irqsave(&local->sta_lock, flags);
994 list_for_each_entry(sta, &local->sta_list, list) {
995 sdata = sta->sdata;
996 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
997 sdata = container_of(sdata->bss,
998 struct ieee80211_sub_if_data,
999 u.ap);
1000
1001 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD,
1002 &sta->sta);
1003 }
1004 spin_unlock_irqrestore(&local->sta_lock, flags);
1005 }
1006
1007 /* Clear Suspend state so that ADDBA requests can be processed */
1008
1009 rcu_read_lock();
1010
1011 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1012 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1013 clear_sta_flags(sta, WLAN_STA_SUSPEND);
1014 }
1015 }
1016
1017 rcu_read_unlock();
1018
1019 /* setup RTS threshold */
1020 drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
1021
1022 /* reconfigure hardware */
1023 ieee80211_hw_config(local, ~0);
1024
1025 netif_addr_lock_bh(local->mdev);
1026 ieee80211_configure_filter(local);
1027 netif_addr_unlock_bh(local->mdev);
1028
1029 /* Finally also reconfigure all the BSS information */
1030 list_for_each_entry(sdata, &local->interfaces, list) {
1031 u32 changed = ~0;
1032 if (!netif_running(sdata->dev))
1033 continue;
1034 switch (sdata->vif.type) {
1035 case NL80211_IFTYPE_STATION:
1036 /* disable beacon change bits */
1037 changed &= ~(BSS_CHANGED_BEACON |
1038 BSS_CHANGED_BEACON_ENABLED);
1039 /* fall through */
1040 case NL80211_IFTYPE_ADHOC:
1041 case NL80211_IFTYPE_AP:
1042 case NL80211_IFTYPE_MESH_POINT:
1043 ieee80211_bss_info_change_notify(sdata, changed);
1044 break;
1045 case NL80211_IFTYPE_WDS:
1046 break;
1047 case NL80211_IFTYPE_AP_VLAN:
1048 case NL80211_IFTYPE_MONITOR:
1049 /* ignore virtual */
1050 break;
1051 case NL80211_IFTYPE_UNSPECIFIED:
1052 case __NL80211_IFTYPE_AFTER_LAST:
1053 WARN_ON(1);
1054 break;
1055 }
1056 }
1057
1058 /* add back keys */
1059 list_for_each_entry(sdata, &local->interfaces, list)
1060 if (netif_running(sdata->dev))
1061 ieee80211_enable_keys(sdata);
1062
1063 ieee80211_wake_queues_by_reason(hw,
1064 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1065
1066 /*
1067 * If this is for hw restart things are still running.
1068 * We may want to change that later, however.
1069 */
1070 if (!from_suspend)
1071 return 0;
1072
1073#ifdef CONFIG_PM
1074 local->suspended = false;
1075
1076 list_for_each_entry(sdata, &local->interfaces, list) {
1077 switch(sdata->vif.type) {
1078 case NL80211_IFTYPE_STATION:
1079 ieee80211_sta_restart(sdata);
1080 break;
1081 case NL80211_IFTYPE_ADHOC:
1082 ieee80211_ibss_restart(sdata);
1083 break;
1084 case NL80211_IFTYPE_MESH_POINT:
1085 ieee80211_mesh_restart(sdata);
1086 break;
1087 default:
1088 break;
1089 }
1090 }
1091
1092 add_timer(&local->sta_cleanup);
1093
1094 spin_lock_irqsave(&local->sta_lock, flags);
1095 list_for_each_entry(sta, &local->sta_list, list)
1096 mesh_plink_restart(sta);
1097 spin_unlock_irqrestore(&local->sta_lock, flags);
1098#else
1099 WARN_ON(1);
1100#endif
1101 return 0;
1102}
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 959aa8379ccf..a01154e127f0 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -27,100 +27,6 @@
27#include "aes_ccm.h" 27#include "aes_ccm.h"
28 28
29 29
30static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr,
31 int idx, int alg, int remove,
32 int set_tx_key, const u8 *_key,
33 size_t key_len)
34{
35 struct ieee80211_local *local = sdata->local;
36 struct sta_info *sta;
37 struct ieee80211_key *key;
38 int err;
39
40 if (alg == ALG_AES_CMAC) {
41 if (idx < NUM_DEFAULT_KEYS ||
42 idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) {
43 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d "
44 "(BIP)\n", sdata->dev->name, idx);
45 return -EINVAL;
46 }
47 } else if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
48 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
49 sdata->dev->name, idx);
50 return -EINVAL;
51 }
52
53 if (remove) {
54 rcu_read_lock();
55
56 err = 0;
57
58 if (is_broadcast_ether_addr(sta_addr)) {
59 key = sdata->keys[idx];
60 } else {
61 sta = sta_info_get(local, sta_addr);
62 if (!sta) {
63 err = -ENOENT;
64 goto out_unlock;
65 }
66 key = sta->key;
67 }
68
69 ieee80211_key_free(key);
70 } else {
71 key = ieee80211_key_alloc(alg, idx, key_len, _key);
72 if (!key)
73 return -ENOMEM;
74
75 sta = NULL;
76 err = 0;
77
78 rcu_read_lock();
79
80 if (!is_broadcast_ether_addr(sta_addr)) {
81 set_tx_key = 0;
82 /*
83 * According to the standard, the key index of a
84 * pairwise key must be zero. However, some AP are
85 * broken when it comes to WEP key indices, so we
86 * work around this.
87 */
88 if (idx != 0 && alg != ALG_WEP) {
89 ieee80211_key_free(key);
90 err = -EINVAL;
91 goto out_unlock;
92 }
93
94 sta = sta_info_get(local, sta_addr);
95 if (!sta) {
96 ieee80211_key_free(key);
97 err = -ENOENT;
98 goto out_unlock;
99 }
100 }
101
102 if (alg == ALG_WEP &&
103 key_len != LEN_WEP40 && key_len != LEN_WEP104) {
104 ieee80211_key_free(key);
105 err = -EINVAL;
106 goto out_unlock;
107 }
108
109 ieee80211_key_link(key, sdata, sta);
110
111 if (set_tx_key || (!sta && !sdata->default_key && key))
112 ieee80211_set_default_key(sdata, idx);
113 if (alg == ALG_AES_CMAC &&
114 (set_tx_key || (!sta && !sdata->default_mgmt_key && key)))
115 ieee80211_set_default_mgmt_key(sdata, idx);
116 }
117
118 out_unlock:
119 rcu_read_unlock();
120
121 return err;
122}
123
124static int ieee80211_ioctl_siwgenie(struct net_device *dev, 30static int ieee80211_ioctl_siwgenie(struct net_device *dev,
125 struct iw_request_info *info, 31 struct iw_request_info *info,
126 struct iw_point *data, char *extra) 32 struct iw_point *data, char *extra)
@@ -131,11 +37,13 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
131 37
132 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 38 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
133 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); 39 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
134 if (ret) 40 if (ret && ret != -EALREADY)
135 return ret; 41 return ret;
136 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 42 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
137 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 43 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
138 ieee80211_sta_req_auth(sdata); 44 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
45 if (ret != -EALREADY)
46 ieee80211_sta_req_auth(sdata);
139 return 0; 47 return 0;
140 } 48 }
141 49
@@ -149,17 +57,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
149 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 57 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
150 58
151 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 59 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
152 sdata->u.ibss.flags &= ~IEEE80211_IBSS_AUTO_CHANNEL_SEL; 60 return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra);
153 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 61 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
154 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; 62 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
155 63
156 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ 64 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
157 if (freq->e == 0) { 65 if (freq->e == 0) {
158 if (freq->m < 0) { 66 if (freq->m < 0) {
159 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 67 if (sdata->vif.type == NL80211_IFTYPE_STATION)
160 sdata->u.ibss.flags |=
161 IEEE80211_IBSS_AUTO_CHANNEL_SEL;
162 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
163 sdata->u.mgd.flags |= 68 sdata->u.mgd.flags |=
164 IEEE80211_STA_AUTO_CHANNEL_SEL; 69 IEEE80211_STA_AUTO_CHANNEL_SEL;
165 return 0; 70 return 0;
@@ -183,8 +88,12 @@ static int ieee80211_ioctl_giwfreq(struct net_device *dev,
183 struct iw_freq *freq, char *extra) 88 struct iw_freq *freq, char *extra)
184{ 89{
185 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 90 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
91 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
186 92
187 freq->m = local->hw.conf.channel->center_freq; 93 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
94 return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
95
96 freq->m = local->oper_channel->center_freq;
188 freq->e = 6; 97 freq->e = 6;
189 98
190 return 0; 99 return 0;
@@ -195,15 +104,17 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
195 struct iw_request_info *info, 104 struct iw_request_info *info,
196 struct iw_point *data, char *ssid) 105 struct iw_point *data, char *ssid)
197{ 106{
198 struct ieee80211_sub_if_data *sdata; 107 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
199 size_t len = data->length; 108 size_t len = data->length;
200 int ret; 109 int ret;
201 110
111 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
112 return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
113
202 /* iwconfig uses nul termination in SSID.. */ 114 /* iwconfig uses nul termination in SSID.. */
203 if (len > 0 && ssid[len - 1] == '\0') 115 if (len > 0 && ssid[len - 1] == '\0')
204 len--; 116 len--;
205 117
206 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
207 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 118 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
208 if (data->flags) 119 if (data->flags)
209 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 120 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
@@ -215,10 +126,10 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
215 return ret; 126 return ret;
216 127
217 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 128 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
129 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
218 ieee80211_sta_req_auth(sdata); 130 ieee80211_sta_req_auth(sdata);
219 return 0; 131 return 0;
220 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 132 }
221 return ieee80211_ibss_set_ssid(sdata, ssid, len);
222 133
223 return -EOPNOTSUPP; 134 return -EOPNOTSUPP;
224} 135}
@@ -229,9 +140,13 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
229 struct iw_point *data, char *ssid) 140 struct iw_point *data, char *ssid)
230{ 141{
231 size_t len; 142 size_t len;
232
233 struct ieee80211_sub_if_data *sdata; 143 struct ieee80211_sub_if_data *sdata;
144
234 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 145 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
146
147 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
148 return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
149
235 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 150 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
236 int res = ieee80211_sta_get_ssid(sdata, ssid, &len); 151 int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
237 if (res == 0) { 152 if (res == 0) {
@@ -240,14 +155,6 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
240 } else 155 } else
241 data->flags = 0; 156 data->flags = 0;
242 return res; 157 return res;
243 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
244 int res = ieee80211_ibss_get_ssid(sdata, ssid, &len);
245 if (res == 0) {
246 data->length = len;
247 data->flags = 1;
248 } else
249 data->flags = 0;
250 return res;
251 } 158 }
252 159
253 return -EOPNOTSUPP; 160 return -EOPNOTSUPP;
@@ -258,9 +165,11 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
258 struct iw_request_info *info, 165 struct iw_request_info *info,
259 struct sockaddr *ap_addr, char *extra) 166 struct sockaddr *ap_addr, char *extra)
260{ 167{
261 struct ieee80211_sub_if_data *sdata; 168 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
169
170 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
171 return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
262 172
263 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
264 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 173 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
265 int ret; 174 int ret;
266 175
@@ -275,18 +184,9 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
275 if (ret) 184 if (ret)
276 return ret; 185 return ret;
277 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 186 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
187 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
278 ieee80211_sta_req_auth(sdata); 188 ieee80211_sta_req_auth(sdata);
279 return 0; 189 return 0;
280 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
281 if (is_zero_ether_addr((u8 *) &ap_addr->sa_data))
282 sdata->u.ibss.flags |= IEEE80211_IBSS_AUTO_BSSID_SEL |
283 IEEE80211_IBSS_AUTO_CHANNEL_SEL;
284 else if (is_broadcast_ether_addr((u8 *) &ap_addr->sa_data))
285 sdata->u.ibss.flags |= IEEE80211_IBSS_AUTO_BSSID_SEL;
286 else
287 sdata->u.ibss.flags &= ~IEEE80211_IBSS_AUTO_BSSID_SEL;
288
289 return ieee80211_ibss_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
290 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { 190 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
291 /* 191 /*
292 * If it is necessary to update the WDS peer address 192 * If it is necessary to update the WDS peer address
@@ -312,9 +212,11 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
312 struct iw_request_info *info, 212 struct iw_request_info *info,
313 struct sockaddr *ap_addr, char *extra) 213 struct sockaddr *ap_addr, char *extra)
314{ 214{
315 struct ieee80211_sub_if_data *sdata; 215 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
216
217 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
218 return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
316 219
317 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
318 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 220 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
319 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATED) { 221 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATED) {
320 ap_addr->sa_family = ARPHRD_ETHER; 222 ap_addr->sa_family = ARPHRD_ETHER;
@@ -322,13 +224,6 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
322 } else 224 } else
323 memset(&ap_addr->sa_data, 0, ETH_ALEN); 225 memset(&ap_addr->sa_data, 0, ETH_ALEN);
324 return 0; 226 return 0;
325 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
326 if (sdata->u.ibss.state == IEEE80211_IBSS_MLME_JOINED) {
327 ap_addr->sa_family = ARPHRD_ETHER;
328 memcpy(&ap_addr->sa_data, sdata->u.ibss.bssid, ETH_ALEN);
329 } else
330 memset(&ap_addr->sa_data, 0, ETH_ALEN);
331 return 0;
332 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { 227 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
333 ap_addr->sa_family = ARPHRD_ETHER; 228 ap_addr->sa_family = ARPHRD_ETHER;
334 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); 229 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
@@ -487,258 +382,6 @@ static int ieee80211_ioctl_giwtxpower(struct net_device *dev,
487 return 0; 382 return 0;
488} 383}
489 384
490static int ieee80211_ioctl_siwrts(struct net_device *dev,
491 struct iw_request_info *info,
492 struct iw_param *rts, char *extra)
493{
494 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
495
496 if (rts->disabled)
497 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
498 else if (!rts->fixed)
499 /* if the rts value is not fixed, then take default */
500 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
501 else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD)
502 return -EINVAL;
503 else
504 local->rts_threshold = rts->value;
505
506 /* If the wlan card performs RTS/CTS in hardware/firmware,
507 * configure it here */
508
509 if (local->ops->set_rts_threshold)
510 local->ops->set_rts_threshold(local_to_hw(local),
511 local->rts_threshold);
512
513 return 0;
514}
515
516static int ieee80211_ioctl_giwrts(struct net_device *dev,
517 struct iw_request_info *info,
518 struct iw_param *rts, char *extra)
519{
520 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
521
522 rts->value = local->rts_threshold;
523 rts->disabled = (rts->value >= IEEE80211_MAX_RTS_THRESHOLD);
524 rts->fixed = 1;
525
526 return 0;
527}
528
529
530static int ieee80211_ioctl_siwfrag(struct net_device *dev,
531 struct iw_request_info *info,
532 struct iw_param *frag, char *extra)
533{
534 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
535
536 if (frag->disabled)
537 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
538 else if (!frag->fixed)
539 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
540 else if (frag->value < 256 ||
541 frag->value > IEEE80211_MAX_FRAG_THRESHOLD)
542 return -EINVAL;
543 else {
544 /* Fragment length must be even, so strip LSB. */
545 local->fragmentation_threshold = frag->value & ~0x1;
546 }
547
548 return 0;
549}
550
551static int ieee80211_ioctl_giwfrag(struct net_device *dev,
552 struct iw_request_info *info,
553 struct iw_param *frag, char *extra)
554{
555 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
556
557 frag->value = local->fragmentation_threshold;
558 frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD);
559 frag->fixed = 1;
560
561 return 0;
562}
563
564
565static int ieee80211_ioctl_siwretry(struct net_device *dev,
566 struct iw_request_info *info,
567 struct iw_param *retry, char *extra)
568{
569 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
570
571 if (retry->disabled ||
572 (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
573 return -EINVAL;
574
575 if (retry->flags & IW_RETRY_MAX) {
576 local->hw.conf.long_frame_max_tx_count = retry->value;
577 } else if (retry->flags & IW_RETRY_MIN) {
578 local->hw.conf.short_frame_max_tx_count = retry->value;
579 } else {
580 local->hw.conf.long_frame_max_tx_count = retry->value;
581 local->hw.conf.short_frame_max_tx_count = retry->value;
582 }
583
584 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
585
586 return 0;
587}
588
589
590static int ieee80211_ioctl_giwretry(struct net_device *dev,
591 struct iw_request_info *info,
592 struct iw_param *retry, char *extra)
593{
594 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
595
596 retry->disabled = 0;
597 if (retry->flags == 0 || retry->flags & IW_RETRY_MIN) {
598 /* first return min value, iwconfig will ask max value
599 * later if needed */
600 retry->flags |= IW_RETRY_LIMIT;
601 retry->value = local->hw.conf.short_frame_max_tx_count;
602 if (local->hw.conf.long_frame_max_tx_count !=
603 local->hw.conf.short_frame_max_tx_count)
604 retry->flags |= IW_RETRY_MIN;
605 return 0;
606 }
607 if (retry->flags & IW_RETRY_MAX) {
608 retry->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
609 retry->value = local->hw.conf.long_frame_max_tx_count;
610 }
611
612 return 0;
613}
614
615static int ieee80211_ioctl_siwmlme(struct net_device *dev,
616 struct iw_request_info *info,
617 struct iw_point *data, char *extra)
618{
619 struct ieee80211_sub_if_data *sdata;
620 struct iw_mlme *mlme = (struct iw_mlme *) extra;
621
622 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
623 if (!(sdata->vif.type == NL80211_IFTYPE_STATION))
624 return -EINVAL;
625
626 switch (mlme->cmd) {
627 case IW_MLME_DEAUTH:
628 /* TODO: mlme->addr.sa_data */
629 return ieee80211_sta_deauthenticate(sdata, mlme->reason_code);
630 case IW_MLME_DISASSOC:
631 /* TODO: mlme->addr.sa_data */
632 return ieee80211_sta_disassociate(sdata, mlme->reason_code);
633 default:
634 return -EOPNOTSUPP;
635 }
636}
637
638
639static int ieee80211_ioctl_siwencode(struct net_device *dev,
640 struct iw_request_info *info,
641 struct iw_point *erq, char *keybuf)
642{
643 struct ieee80211_sub_if_data *sdata;
644 int idx, i, alg = ALG_WEP;
645 u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
646 int remove = 0, ret;
647
648 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
649
650 idx = erq->flags & IW_ENCODE_INDEX;
651 if (idx == 0) {
652 if (sdata->default_key)
653 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
654 if (sdata->default_key == sdata->keys[i]) {
655 idx = i;
656 break;
657 }
658 }
659 } else if (idx < 1 || idx > 4)
660 return -EINVAL;
661 else
662 idx--;
663
664 if (erq->flags & IW_ENCODE_DISABLED)
665 remove = 1;
666 else if (erq->length == 0) {
667 /* No key data - just set the default TX key index */
668 ieee80211_set_default_key(sdata, idx);
669 return 0;
670 }
671
672 ret = ieee80211_set_encryption(
673 sdata, bcaddr,
674 idx, alg, remove,
675 !sdata->default_key,
676 keybuf, erq->length);
677
678 if (!ret) {
679 if (remove)
680 sdata->u.mgd.flags &= ~IEEE80211_STA_TKIP_WEP_USED;
681 else
682 sdata->u.mgd.flags |= IEEE80211_STA_TKIP_WEP_USED;
683 }
684
685 return ret;
686}
687
688
689static int ieee80211_ioctl_giwencode(struct net_device *dev,
690 struct iw_request_info *info,
691 struct iw_point *erq, char *key)
692{
693 struct ieee80211_sub_if_data *sdata;
694 int idx, i;
695
696 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
697
698 idx = erq->flags & IW_ENCODE_INDEX;
699 if (idx < 1 || idx > 4) {
700 idx = -1;
701 if (!sdata->default_key)
702 idx = 0;
703 else for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
704 if (sdata->default_key == sdata->keys[i]) {
705 idx = i;
706 break;
707 }
708 }
709 if (idx < 0)
710 return -EINVAL;
711 } else
712 idx--;
713
714 erq->flags = idx + 1;
715
716 if (!sdata->keys[idx]) {
717 erq->length = 0;
718 erq->flags |= IW_ENCODE_DISABLED;
719 return 0;
720 }
721
722 memcpy(key, sdata->keys[idx]->conf.key,
723 min_t(int, erq->length, sdata->keys[idx]->conf.keylen));
724 erq->length = sdata->keys[idx]->conf.keylen;
725 erq->flags |= IW_ENCODE_ENABLED;
726
727 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
728 switch (sdata->u.mgd.auth_alg) {
729 case WLAN_AUTH_OPEN:
730 case WLAN_AUTH_LEAP:
731 erq->flags |= IW_ENCODE_OPEN;
732 break;
733 case WLAN_AUTH_SHARED_KEY:
734 erq->flags |= IW_ENCODE_RESTRICTED;
735 break;
736 }
737 }
738
739 return 0;
740}
741
742static int ieee80211_ioctl_siwpower(struct net_device *dev, 385static int ieee80211_ioctl_siwpower(struct net_device *dev,
743 struct iw_request_info *info, 386 struct iw_request_info *info,
744 struct iw_param *wrq, 387 struct iw_param *wrq,
@@ -747,7 +390,7 @@ static int ieee80211_ioctl_siwpower(struct net_device *dev,
747 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 390 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
748 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 391 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
749 struct ieee80211_conf *conf = &local->hw.conf; 392 struct ieee80211_conf *conf = &local->hw.conf;
750 int ret = 0, timeout = 0; 393 int timeout = 0;
751 bool ps; 394 bool ps;
752 395
753 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 396 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
@@ -779,42 +422,18 @@ static int ieee80211_ioctl_siwpower(struct net_device *dev,
779 timeout = wrq->value / 1000; 422 timeout = wrq->value / 1000;
780 423
781 set: 424 set:
782 if (ps == local->powersave && timeout == conf->dynamic_ps_timeout) 425 if (ps == sdata->u.mgd.powersave && timeout == conf->dynamic_ps_timeout)
783 return ret; 426 return 0;
784 427
785 local->powersave = ps; 428 sdata->u.mgd.powersave = ps;
786 conf->dynamic_ps_timeout = timeout; 429 conf->dynamic_ps_timeout = timeout;
787 430
788 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 431 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
789 ret = ieee80211_hw_config(local, 432 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
790 IEEE80211_CONF_CHANGE_DYNPS_TIMEOUT);
791 433
792 if (!(sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED)) 434 ieee80211_recalc_ps(local, -1);
793 return ret;
794
795 if (conf->dynamic_ps_timeout > 0 &&
796 !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
797 mod_timer(&local->dynamic_ps_timer, jiffies +
798 msecs_to_jiffies(conf->dynamic_ps_timeout));
799 } else {
800 if (local->powersave) {
801 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
802 ieee80211_send_nullfunc(local, sdata, 1);
803 conf->flags |= IEEE80211_CONF_PS;
804 ret = ieee80211_hw_config(local,
805 IEEE80211_CONF_CHANGE_PS);
806 } else {
807 conf->flags &= ~IEEE80211_CONF_PS;
808 ret = ieee80211_hw_config(local,
809 IEEE80211_CONF_CHANGE_PS);
810 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
811 ieee80211_send_nullfunc(local, sdata, 0);
812 del_timer_sync(&local->dynamic_ps_timer);
813 cancel_work_sync(&local->dynamic_ps_enable_work);
814 }
815 }
816 435
817 return ret; 436 return 0;
818} 437}
819 438
820static int ieee80211_ioctl_giwpower(struct net_device *dev, 439static int ieee80211_ioctl_giwpower(struct net_device *dev,
@@ -822,9 +441,9 @@ static int ieee80211_ioctl_giwpower(struct net_device *dev,
822 union iwreq_data *wrqu, 441 union iwreq_data *wrqu,
823 char *extra) 442 char *extra)
824{ 443{
825 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 444 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
826 445
827 wrqu->power.disabled = !local->powersave; 446 wrqu->power.disabled = !sdata->u.mgd.powersave;
828 447
829 return 0; 448 return 0;
830} 449}
@@ -997,82 +616,6 @@ static int ieee80211_ioctl_giwauth(struct net_device *dev,
997} 616}
998 617
999 618
1000static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1001 struct iw_request_info *info,
1002 struct iw_point *erq, char *extra)
1003{
1004 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1005 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
1006 int uninitialized_var(alg), idx, i, remove = 0;
1007
1008 switch (ext->alg) {
1009 case IW_ENCODE_ALG_NONE:
1010 remove = 1;
1011 break;
1012 case IW_ENCODE_ALG_WEP:
1013 alg = ALG_WEP;
1014 break;
1015 case IW_ENCODE_ALG_TKIP:
1016 alg = ALG_TKIP;
1017 break;
1018 case IW_ENCODE_ALG_CCMP:
1019 alg = ALG_CCMP;
1020 break;
1021 case IW_ENCODE_ALG_AES_CMAC:
1022 alg = ALG_AES_CMAC;
1023 break;
1024 default:
1025 return -EOPNOTSUPP;
1026 }
1027
1028 if (erq->flags & IW_ENCODE_DISABLED)
1029 remove = 1;
1030
1031 idx = erq->flags & IW_ENCODE_INDEX;
1032 if (alg == ALG_AES_CMAC) {
1033 if (idx < NUM_DEFAULT_KEYS + 1 ||
1034 idx > NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) {
1035 idx = -1;
1036 if (!sdata->default_mgmt_key)
1037 idx = 0;
1038 else for (i = NUM_DEFAULT_KEYS;
1039 i < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1040 i++) {
1041 if (sdata->default_mgmt_key == sdata->keys[i])
1042 {
1043 idx = i;
1044 break;
1045 }
1046 }
1047 if (idx < 0)
1048 return -EINVAL;
1049 } else
1050 idx--;
1051 } else {
1052 if (idx < 1 || idx > 4) {
1053 idx = -1;
1054 if (!sdata->default_key)
1055 idx = 0;
1056 else for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1057 if (sdata->default_key == sdata->keys[i]) {
1058 idx = i;
1059 break;
1060 }
1061 }
1062 if (idx < 0)
1063 return -EINVAL;
1064 } else
1065 idx--;
1066 }
1067
1068 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
1069 remove,
1070 ext->ext_flags &
1071 IW_ENCODE_EXT_SET_TX_KEY,
1072 ext->key, ext->key_len);
1073}
1074
1075
1076/* Structures to export the Wireless Handlers */ 619/* Structures to export the Wireless Handlers */
1077 620
1078static const iw_handler ieee80211_handler[] = 621static const iw_handler ieee80211_handler[] =
@@ -1099,7 +642,7 @@ static const iw_handler ieee80211_handler[] =
1099 (iw_handler) NULL, /* SIOCGIWTHRSPY */ 642 (iw_handler) NULL, /* SIOCGIWTHRSPY */
1100 (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */ 643 (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */
1101 (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */ 644 (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */
1102 (iw_handler) ieee80211_ioctl_siwmlme, /* SIOCSIWMLME */ 645 (iw_handler) cfg80211_wext_siwmlme, /* SIOCSIWMLME */
1103 (iw_handler) NULL, /* SIOCGIWAPLIST */ 646 (iw_handler) NULL, /* SIOCGIWAPLIST */
1104 (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */ 647 (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */
1105 (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */ 648 (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */
@@ -1111,16 +654,16 @@ static const iw_handler ieee80211_handler[] =
1111 (iw_handler) NULL, /* -- hole -- */ 654 (iw_handler) NULL, /* -- hole -- */
1112 (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */ 655 (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */
1113 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */ 656 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */
1114 (iw_handler) ieee80211_ioctl_siwrts, /* SIOCSIWRTS */ 657 (iw_handler) cfg80211_wext_siwrts, /* SIOCSIWRTS */
1115 (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */ 658 (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */
1116 (iw_handler) ieee80211_ioctl_siwfrag, /* SIOCSIWFRAG */ 659 (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */
1117 (iw_handler) ieee80211_ioctl_giwfrag, /* SIOCGIWFRAG */ 660 (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */
1118 (iw_handler) ieee80211_ioctl_siwtxpower, /* SIOCSIWTXPOW */ 661 (iw_handler) ieee80211_ioctl_siwtxpower, /* SIOCSIWTXPOW */
1119 (iw_handler) ieee80211_ioctl_giwtxpower, /* SIOCGIWTXPOW */ 662 (iw_handler) ieee80211_ioctl_giwtxpower, /* SIOCGIWTXPOW */
1120 (iw_handler) ieee80211_ioctl_siwretry, /* SIOCSIWRETRY */ 663 (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */
1121 (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */ 664 (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */
1122 (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */ 665 (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */
1123 (iw_handler) ieee80211_ioctl_giwencode, /* SIOCGIWENCODE */ 666 (iw_handler) cfg80211_wext_giwencode, /* SIOCGIWENCODE */
1124 (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */ 667 (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */
1125 (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */ 668 (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */
1126 (iw_handler) NULL, /* -- hole -- */ 669 (iw_handler) NULL, /* -- hole -- */
@@ -1129,7 +672,7 @@ static const iw_handler ieee80211_handler[] =
1129 (iw_handler) NULL, /* SIOCGIWGENIE */ 672 (iw_handler) NULL, /* SIOCGIWGENIE */
1130 (iw_handler) ieee80211_ioctl_siwauth, /* SIOCSIWAUTH */ 673 (iw_handler) ieee80211_ioctl_siwauth, /* SIOCSIWAUTH */
1131 (iw_handler) ieee80211_ioctl_giwauth, /* SIOCGIWAUTH */ 674 (iw_handler) ieee80211_ioctl_giwauth, /* SIOCGIWAUTH */
1132 (iw_handler) ieee80211_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */ 675 (iw_handler) cfg80211_wext_siwencodeext, /* SIOCSIWENCODEEXT */
1133 (iw_handler) NULL, /* SIOCGIWENCODEEXT */ 676 (iw_handler) NULL, /* SIOCGIWENCODEEXT */
1134 (iw_handler) NULL, /* SIOCSIWPMKSA */ 677 (iw_handler) NULL, /* SIOCSIWPMKSA */
1135 (iw_handler) NULL, /* -- hole -- */ 678 (iw_handler) NULL, /* -- hole -- */
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 0b8ad1f4ecdd..694343b9102b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -23,34 +23,6 @@
23 */ 23 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 25
26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
27
28/* Given a data frame determine the 802.1p/1d tag to use. */
29static unsigned int classify_1d(struct sk_buff *skb)
30{
31 unsigned int dscp;
32
33 /* skb->priority values from 256->263 are magic values to
34 * directly indicate a specific 802.1d priority. This is used
35 * to allow 802.1d priority to be passed directly in from VLAN
36 * tags, etc.
37 */
38 if (skb->priority >= 256 && skb->priority <= 263)
39 return skb->priority - 256;
40
41 switch (skb->protocol) {
42 case htons(ETH_P_IP):
43 dscp = ip_hdr(skb)->tos & 0xfc;
44 break;
45
46 default:
47 return 0;
48 }
49
50 return dscp >> 5;
51}
52
53
54static int wme_downgrade_ac(struct sk_buff *skb) 26static int wme_downgrade_ac(struct sk_buff *skb)
55{ 27{
56 switch (skb->priority) { 28 switch (skb->priority) {
@@ -94,7 +66,7 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
94 66
95 /* use the data classifier to determine what 802.1d tag the 67 /* use the data classifier to determine what 802.1d tag the
96 * data frame has */ 68 * data frame has */
97 skb->priority = classify_1d(skb); 69 skb->priority = cfg80211_classify8021d(skb);
98 70
99 /* in case we are a client verify acm is not set for this ac */ 71 /* in case we are a client verify acm is not set for this ac */
100 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 72 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
@@ -133,7 +105,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
133 u8 *p = ieee80211_get_qos_ctl(hdr); 105 u8 *p = ieee80211_get_qos_ctl(hdr);
134 u8 ack_policy = 0; 106 u8 ack_policy = 0;
135 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 107 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
136 if (local->wifi_wme_noack_test) 108 if (unlikely(local->wifi_wme_noack_test))
137 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << 109 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
138 QOS_CONTROL_ACK_POLICY_SHIFT; 110 QOS_CONTROL_ACK_POLICY_SHIFT;
139 /* qos header is 2 bytes, second reserved */ 111 /* qos header is 2 bytes, second reserved */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 4f8bfea278f2..dcfae8884b86 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -122,7 +122,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
122 return RX_DROP_UNUSABLE; 122 return RX_DROP_UNUSABLE;
123 123
124 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, 124 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
125 (void *) skb->data); 125 (void *) skb->data, NULL);
126 return RX_DROP_UNUSABLE; 126 return RX_DROP_UNUSABLE;
127 } 127 }
128 128
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e01061f49cdc..7c1333c67ff3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3345,22 +3345,8 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3345 3345
3346static int __init ip_vs_genl_register(void) 3346static int __init ip_vs_genl_register(void)
3347{ 3347{
3348 int ret, i; 3348 return genl_register_family_with_ops(&ip_vs_genl_family,
3349 3349 ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops));
3350 ret = genl_register_family(&ip_vs_genl_family);
3351 if (ret)
3352 return ret;
3353
3354 for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) {
3355 ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]);
3356 if (ret)
3357 goto err_out;
3358 }
3359 return 0;
3360
3361err_out:
3362 genl_unregister_family(&ip_vs_genl_family);
3363 return ret;
3364} 3350}
3365 3351
3366static void ip_vs_genl_unregister(void) 3352static void ip_vs_genl_unregister(void)
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index bf1ab1a6790d..e639298bc9c8 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -785,18 +785,6 @@ static struct genl_ops netlbl_cipsov4_ops[] = {
785 */ 785 */
786int __init netlbl_cipsov4_genl_init(void) 786int __init netlbl_cipsov4_genl_init(void)
787{ 787{
788 int ret_val, i; 788 return genl_register_family_with_ops(&netlbl_cipsov4_gnl_family,
789 789 netlbl_cipsov4_ops, ARRAY_SIZE(netlbl_cipsov4_ops));
790 ret_val = genl_register_family(&netlbl_cipsov4_gnl_family);
791 if (ret_val != 0)
792 return ret_val;
793
794 for (i = 0; i < ARRAY_SIZE(netlbl_cipsov4_ops); i++) {
795 ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
796 &netlbl_cipsov4_ops[i]);
797 if (ret_val != 0)
798 return ret_val;
799 }
800
801 return 0;
802} 790}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 1821c5d50fb8..8203623e65ad 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -779,18 +779,6 @@ static struct genl_ops netlbl_mgmt_genl_ops[] = {
779 */ 779 */
780int __init netlbl_mgmt_genl_init(void) 780int __init netlbl_mgmt_genl_init(void)
781{ 781{
782 int ret_val, i; 782 return genl_register_family_with_ops(&netlbl_mgmt_gnl_family,
783 783 netlbl_mgmt_genl_ops, ARRAY_SIZE(netlbl_mgmt_genl_ops));
784 ret_val = genl_register_family(&netlbl_mgmt_gnl_family);
785 if (ret_val != 0)
786 return ret_val;
787
788 for (i = 0; i < ARRAY_SIZE(netlbl_mgmt_genl_ops); i++) {
789 ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
790 &netlbl_mgmt_genl_ops[i]);
791 if (ret_val != 0)
792 return ret_val;
793 }
794
795 return 0;
796} 784}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index f3c5c68c6848..fb357f010189 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1478,20 +1478,8 @@ static struct genl_ops netlbl_unlabel_genl_ops[] = {
1478 */ 1478 */
1479int __init netlbl_unlabel_genl_init(void) 1479int __init netlbl_unlabel_genl_init(void)
1480{ 1480{
1481 int ret_val, i; 1481 return genl_register_family_with_ops(&netlbl_unlabel_gnl_family,
1482 1482 netlbl_unlabel_genl_ops, ARRAY_SIZE(netlbl_unlabel_genl_ops));
1483 ret_val = genl_register_family(&netlbl_unlabel_gnl_family);
1484 if (ret_val != 0)
1485 return ret_val;
1486
1487 for (i = 0; i < ARRAY_SIZE(netlbl_unlabel_genl_ops); i++) {
1488 ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
1489 &netlbl_unlabel_genl_ops[i]);
1490 if (ret_val != 0)
1491 return ret_val;
1492 }
1493
1494 return 0;
1495} 1483}
1496 1484
1497/* 1485/*
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1d3dd30099df..eed4c6a8afc0 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -384,6 +384,52 @@ errout:
384} 384}
385 385
386/** 386/**
387 * genl_register_family_with_ops - register a generic netlink family
388 * @family: generic netlink family
389 * @ops: operations to be registered
390 * @n_ops: number of elements to register
391 *
392 * Registers the specified family and operations from the specified table.
393 * Only one family may be registered with the same family name or identifier.
394 *
395 * The family id may equal GENL_ID_GENERATE causing an unique id to
396 * be automatically generated and assigned.
397 *
398 * Either a doit or dumpit callback must be specified for every registered
399 * operation or the function will fail. Only one operation structure per
400 * command identifier may be registered.
401 *
402 * See include/net/genetlink.h for more documenation on the operations
403 * structure.
404 *
405 * This is equivalent to calling genl_register_family() followed by
406 * genl_register_ops() for every operation entry in the table taking
407 * care to unregister the family on error path.
408 *
409 * Return 0 on success or a negative error code.
410 */
411int genl_register_family_with_ops(struct genl_family *family,
412 struct genl_ops *ops, size_t n_ops)
413{
414 int err, i;
415
416 err = genl_register_family(family);
417 if (err)
418 return err;
419
420 for (i = 0; i < n_ops; ++i, ++ops) {
421 err = genl_register_ops(family, ops);
422 if (err)
423 goto err_out;
424 }
425 return 0;
426err_out:
427 genl_unregister_family(family);
428 return err;
429}
430EXPORT_SYMBOL(genl_register_family_with_ops);
431
432/**
387 * genl_unregister_family - unregister generic netlink family 433 * genl_unregister_family - unregister generic netlink family
388 * @family: generic netlink family 434 * @family: generic netlink family
389 * 435 *
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f546e81acc45..6da9f38ef5c1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -39,6 +39,7 @@
39 * will simply extend the hardware address 39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll 40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq. 41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
42 * 43 *
43 * This program is free software; you can redistribute it and/or 44 * This program is free software; you can redistribute it and/or
44 * modify it under the terms of the GNU General Public License 45 * modify it under the terms of the GNU General Public License
@@ -157,7 +158,25 @@ struct packet_mreq_max
157}; 158};
158 159
159#ifdef CONFIG_PACKET_MMAP 160#ifdef CONFIG_PACKET_MMAP
160static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing); 161static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
162 int closing, int tx_ring);
163
164struct packet_ring_buffer {
165 char * *pg_vec;
166 unsigned int head;
167 unsigned int frames_per_block;
168 unsigned int frame_size;
169 unsigned int frame_max;
170
171 unsigned int pg_vec_order;
172 unsigned int pg_vec_pages;
173 unsigned int pg_vec_len;
174
175 atomic_t pending;
176};
177
178struct packet_sock;
179static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
161#endif 180#endif
162 181
163static void packet_flush_mclist(struct sock *sk); 182static void packet_flush_mclist(struct sock *sk);
@@ -167,11 +186,8 @@ struct packet_sock {
167 struct sock sk; 186 struct sock sk;
168 struct tpacket_stats stats; 187 struct tpacket_stats stats;
169#ifdef CONFIG_PACKET_MMAP 188#ifdef CONFIG_PACKET_MMAP
170 char * *pg_vec; 189 struct packet_ring_buffer rx_ring;
171 unsigned int head; 190 struct packet_ring_buffer tx_ring;
172 unsigned int frames_per_block;
173 unsigned int frame_size;
174 unsigned int frame_max;
175 int copy_thresh; 191 int copy_thresh;
176#endif 192#endif
177 struct packet_type prot_hook; 193 struct packet_type prot_hook;
@@ -185,12 +201,10 @@ struct packet_sock {
185 struct packet_mclist *mclist; 201 struct packet_mclist *mclist;
186#ifdef CONFIG_PACKET_MMAP 202#ifdef CONFIG_PACKET_MMAP
187 atomic_t mapped; 203 atomic_t mapped;
188 unsigned int pg_vec_order;
189 unsigned int pg_vec_pages;
190 unsigned int pg_vec_len;
191 enum tpacket_versions tp_version; 204 enum tpacket_versions tp_version;
192 unsigned int tp_hdrlen; 205 unsigned int tp_hdrlen;
193 unsigned int tp_reserve; 206 unsigned int tp_reserve;
207 unsigned int tp_loss:1;
194#endif 208#endif
195}; 209};
196 210
@@ -206,36 +220,33 @@ struct packet_skb_cb {
206 220
207#ifdef CONFIG_PACKET_MMAP 221#ifdef CONFIG_PACKET_MMAP
208 222
209static void *packet_lookup_frame(struct packet_sock *po, unsigned int position, 223static void __packet_set_status(struct packet_sock *po, void *frame, int status)
210 int status)
211{ 224{
212 unsigned int pg_vec_pos, frame_offset;
213 union { 225 union {
214 struct tpacket_hdr *h1; 226 struct tpacket_hdr *h1;
215 struct tpacket2_hdr *h2; 227 struct tpacket2_hdr *h2;
216 void *raw; 228 void *raw;
217 } h; 229 } h;
218 230
219 pg_vec_pos = position / po->frames_per_block; 231 h.raw = frame;
220 frame_offset = position % po->frames_per_block;
221
222 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
223 switch (po->tp_version) { 232 switch (po->tp_version) {
224 case TPACKET_V1: 233 case TPACKET_V1:
225 if (status != (h.h1->tp_status ? TP_STATUS_USER : 234 h.h1->tp_status = status;
226 TP_STATUS_KERNEL)) 235 flush_dcache_page(virt_to_page(&h.h1->tp_status));
227 return NULL;
228 break; 236 break;
229 case TPACKET_V2: 237 case TPACKET_V2:
230 if (status != (h.h2->tp_status ? TP_STATUS_USER : 238 h.h2->tp_status = status;
231 TP_STATUS_KERNEL)) 239 flush_dcache_page(virt_to_page(&h.h2->tp_status));
232 return NULL;
233 break; 240 break;
241 default:
242 printk(KERN_ERR "TPACKET version not supported\n");
243 BUG();
234 } 244 }
235 return h.raw; 245
246 smp_wmb();
236} 247}
237 248
238static void __packet_set_status(struct packet_sock *po, void *frame, int status) 249static int __packet_get_status(struct packet_sock *po, void *frame)
239{ 250{
240 union { 251 union {
241 struct tpacket_hdr *h1; 252 struct tpacket_hdr *h1;
@@ -243,16 +254,66 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
243 void *raw; 254 void *raw;
244 } h; 255 } h;
245 256
257 smp_rmb();
258
246 h.raw = frame; 259 h.raw = frame;
247 switch (po->tp_version) { 260 switch (po->tp_version) {
248 case TPACKET_V1: 261 case TPACKET_V1:
249 h.h1->tp_status = status; 262 flush_dcache_page(virt_to_page(&h.h1->tp_status));
250 break; 263 return h.h1->tp_status;
251 case TPACKET_V2: 264 case TPACKET_V2:
252 h.h2->tp_status = status; 265 flush_dcache_page(virt_to_page(&h.h2->tp_status));
253 break; 266 return h.h2->tp_status;
267 default:
268 printk(KERN_ERR "TPACKET version not supported\n");
269 BUG();
270 return 0;
254 } 271 }
255} 272}
273
274static void *packet_lookup_frame(struct packet_sock *po,
275 struct packet_ring_buffer *rb,
276 unsigned int position,
277 int status)
278{
279 unsigned int pg_vec_pos, frame_offset;
280 union {
281 struct tpacket_hdr *h1;
282 struct tpacket2_hdr *h2;
283 void *raw;
284 } h;
285
286 pg_vec_pos = position / rb->frames_per_block;
287 frame_offset = position % rb->frames_per_block;
288
289 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
290
291 if (status != __packet_get_status(po, h.raw))
292 return NULL;
293
294 return h.raw;
295}
296
297static inline void *packet_current_frame(struct packet_sock *po,
298 struct packet_ring_buffer *rb,
299 int status)
300{
301 return packet_lookup_frame(po, rb, rb->head, status);
302}
303
304static inline void *packet_previous_frame(struct packet_sock *po,
305 struct packet_ring_buffer *rb,
306 int status)
307{
308 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
309 return packet_lookup_frame(po, rb, previous, status);
310}
311
312static inline void packet_increment_head(struct packet_ring_buffer *buff)
313{
314 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
315}
316
256#endif 317#endif
257 318
258static inline struct packet_sock *pkt_sk(struct sock *sk) 319static inline struct packet_sock *pkt_sk(struct sock *sk)
@@ -648,7 +709,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
648 macoff = netoff - maclen; 709 macoff = netoff - maclen;
649 } 710 }
650 711
651 if (macoff + snaplen > po->frame_size) { 712 if (macoff + snaplen > po->rx_ring.frame_size) {
652 if (po->copy_thresh && 713 if (po->copy_thresh &&
653 atomic_read(&sk->sk_rmem_alloc) + skb->truesize < 714 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
654 (unsigned)sk->sk_rcvbuf) { 715 (unsigned)sk->sk_rcvbuf) {
@@ -661,16 +722,16 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
661 if (copy_skb) 722 if (copy_skb)
662 skb_set_owner_r(copy_skb, sk); 723 skb_set_owner_r(copy_skb, sk);
663 } 724 }
664 snaplen = po->frame_size - macoff; 725 snaplen = po->rx_ring.frame_size - macoff;
665 if ((int)snaplen < 0) 726 if ((int)snaplen < 0)
666 snaplen = 0; 727 snaplen = 0;
667 } 728 }
668 729
669 spin_lock(&sk->sk_receive_queue.lock); 730 spin_lock(&sk->sk_receive_queue.lock);
670 h.raw = packet_lookup_frame(po, po->head, TP_STATUS_KERNEL); 731 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
671 if (!h.raw) 732 if (!h.raw)
672 goto ring_is_full; 733 goto ring_is_full;
673 po->head = po->head != po->frame_max ? po->head+1 : 0; 734 packet_increment_head(&po->rx_ring);
674 po->stats.tp_packets++; 735 po->stats.tp_packets++;
675 if (copy_skb) { 736 if (copy_skb) {
676 status |= TP_STATUS_COPY; 737 status |= TP_STATUS_COPY;
@@ -727,7 +788,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
727 788
728 __packet_set_status(po, h.raw, status); 789 __packet_set_status(po, h.raw, status);
729 smp_mb(); 790 smp_mb();
730
731 { 791 {
732 struct page *p_start, *p_end; 792 struct page *p_start, *p_end;
733 u8 *h_end = h.raw + macoff + snaplen - 1; 793 u8 *h_end = h.raw + macoff + snaplen - 1;
@@ -760,10 +820,249 @@ ring_is_full:
760 goto drop_n_restore; 820 goto drop_n_restore;
761} 821}
762 822
763#endif 823static void tpacket_destruct_skb(struct sk_buff *skb)
824{
825 struct packet_sock *po = pkt_sk(skb->sk);
826 void * ph;
764 827
828 BUG_ON(skb == NULL);
765 829
766static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 830 if (likely(po->tx_ring.pg_vec)) {
831 ph = skb_shinfo(skb)->destructor_arg;
832 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
833 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
834 atomic_dec(&po->tx_ring.pending);
835 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
836 }
837
838 sock_wfree(skb);
839}
840
841static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
842 void * frame, struct net_device *dev, int size_max,
843 __be16 proto, unsigned char * addr)
844{
845 union {
846 struct tpacket_hdr *h1;
847 struct tpacket2_hdr *h2;
848 void *raw;
849 } ph;
850 int to_write, offset, len, tp_len, nr_frags, len_max;
851 struct socket *sock = po->sk.sk_socket;
852 struct page *page;
853 void *data;
854 int err;
855
856 ph.raw = frame;
857
858 skb->protocol = proto;
859 skb->dev = dev;
860 skb->priority = po->sk.sk_priority;
861 skb_shinfo(skb)->destructor_arg = ph.raw;
862
863 switch (po->tp_version) {
864 case TPACKET_V2:
865 tp_len = ph.h2->tp_len;
866 break;
867 default:
868 tp_len = ph.h1->tp_len;
869 break;
870 }
871 if (unlikely(tp_len > size_max)) {
872 printk(KERN_ERR "packet size is too long (%d > %d)\n",
873 tp_len, size_max);
874 return -EMSGSIZE;
875 }
876
877 skb_reserve(skb, LL_RESERVED_SPACE(dev));
878 skb_reset_network_header(skb);
879
880 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
881 to_write = tp_len;
882
883 if (sock->type == SOCK_DGRAM) {
884 err = dev_hard_header(skb, dev, ntohs(proto), addr,
885 NULL, tp_len);
886 if (unlikely(err < 0))
887 return -EINVAL;
888 } else if (dev->hard_header_len ) {
889 /* net device doesn't like empty head */
890 if (unlikely(tp_len <= dev->hard_header_len)) {
891 printk(KERN_ERR "packet size is too short "
892 "(%d < %d)\n", tp_len,
893 dev->hard_header_len);
894 return -EINVAL;
895 }
896
897 skb_push(skb, dev->hard_header_len);
898 err = skb_store_bits(skb, 0, data,
899 dev->hard_header_len);
900 if (unlikely(err))
901 return err;
902
903 data += dev->hard_header_len;
904 to_write -= dev->hard_header_len;
905 }
906
907 err = -EFAULT;
908 page = virt_to_page(data);
909 offset = offset_in_page(data);
910 len_max = PAGE_SIZE - offset;
911 len = ((to_write > len_max) ? len_max : to_write);
912
913 skb->data_len = to_write;
914 skb->len += to_write;
915 skb->truesize += to_write;
916 atomic_add(to_write, &po->sk.sk_wmem_alloc);
917
918 while (likely(to_write)) {
919 nr_frags = skb_shinfo(skb)->nr_frags;
920
921 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
922 printk(KERN_ERR "Packet exceed the number "
923 "of skb frags(%lu)\n",
924 MAX_SKB_FRAGS);
925 return -EFAULT;
926 }
927
928 flush_dcache_page(page);
929 get_page(page);
930 skb_fill_page_desc(skb,
931 nr_frags,
932 page++, offset, len);
933 to_write -= len;
934 offset = 0;
935 len_max = PAGE_SIZE;
936 len = ((to_write > len_max) ? len_max : to_write);
937 }
938
939 return tp_len;
940}
941
942static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
943{
944 struct socket *sock;
945 struct sk_buff *skb;
946 struct net_device *dev;
947 __be16 proto;
948 int ifindex, err, reserve = 0;
949 void * ph;
950 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
951 int tp_len, size_max;
952 unsigned char *addr;
953 int len_sum = 0;
954 int status = 0;
955
956 sock = po->sk.sk_socket;
957
958 mutex_lock(&po->pg_vec_lock);
959
960 err = -EBUSY;
961 if (saddr == NULL) {
962 ifindex = po->ifindex;
963 proto = po->num;
964 addr = NULL;
965 } else {
966 err = -EINVAL;
967 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
968 goto out;
969 if (msg->msg_namelen < (saddr->sll_halen
970 + offsetof(struct sockaddr_ll,
971 sll_addr)))
972 goto out;
973 ifindex = saddr->sll_ifindex;
974 proto = saddr->sll_protocol;
975 addr = saddr->sll_addr;
976 }
977
978 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
979 err = -ENXIO;
980 if (unlikely(dev == NULL))
981 goto out;
982
983 reserve = dev->hard_header_len;
984
985 err = -ENETDOWN;
986 if (unlikely(!(dev->flags & IFF_UP)))
987 goto out_put;
988
989 size_max = po->tx_ring.frame_size
990 - sizeof(struct skb_shared_info)
991 - po->tp_hdrlen
992 - LL_ALLOCATED_SPACE(dev)
993 - sizeof(struct sockaddr_ll);
994
995 if (size_max > dev->mtu + reserve)
996 size_max = dev->mtu + reserve;
997
998 do {
999 ph = packet_current_frame(po, &po->tx_ring,
1000 TP_STATUS_SEND_REQUEST);
1001
1002 if (unlikely(ph == NULL)) {
1003 schedule();
1004 continue;
1005 }
1006
1007 status = TP_STATUS_SEND_REQUEST;
1008 skb = sock_alloc_send_skb(&po->sk,
1009 LL_ALLOCATED_SPACE(dev)
1010 + sizeof(struct sockaddr_ll),
1011 0, &err);
1012
1013 if (unlikely(skb == NULL))
1014 goto out_status;
1015
1016 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1017 addr);
1018
1019 if (unlikely(tp_len < 0)) {
1020 if (po->tp_loss) {
1021 __packet_set_status(po, ph,
1022 TP_STATUS_AVAILABLE);
1023 packet_increment_head(&po->tx_ring);
1024 kfree_skb(skb);
1025 continue;
1026 } else {
1027 status = TP_STATUS_WRONG_FORMAT;
1028 err = tp_len;
1029 goto out_status;
1030 }
1031 }
1032
1033 skb->destructor = tpacket_destruct_skb;
1034 __packet_set_status(po, ph, TP_STATUS_SENDING);
1035 atomic_inc(&po->tx_ring.pending);
1036
1037 status = TP_STATUS_SEND_REQUEST;
1038 err = dev_queue_xmit(skb);
1039 if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
1040 goto out_xmit;
1041 packet_increment_head(&po->tx_ring);
1042 len_sum += tp_len;
1043 }
1044 while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
1045 && (atomic_read(&po->tx_ring.pending))))
1046 );
1047
1048 err = len_sum;
1049 goto out_put;
1050
1051out_xmit:
1052 skb->destructor = sock_wfree;
1053 atomic_dec(&po->tx_ring.pending);
1054out_status:
1055 __packet_set_status(po, ph, status);
1056 kfree_skb(skb);
1057out_put:
1058 dev_put(dev);
1059out:
1060 mutex_unlock(&po->pg_vec_lock);
1061 return err;
1062}
1063#endif
1064
1065static int packet_snd(struct socket *sock,
767 struct msghdr *msg, size_t len) 1066 struct msghdr *msg, size_t len)
768{ 1067{
769 struct sock *sk = sock->sk; 1068 struct sock *sk = sock->sk;
@@ -854,6 +1153,19 @@ out:
854 return err; 1153 return err;
855} 1154}
856 1155
1156static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1157 struct msghdr *msg, size_t len)
1158{
1159#ifdef CONFIG_PACKET_MMAP
1160 struct sock *sk = sock->sk;
1161 struct packet_sock *po = pkt_sk(sk);
1162 if (po->tx_ring.pg_vec)
1163 return tpacket_snd(po, msg);
1164 else
1165#endif
1166 return packet_snd(sock, msg, len);
1167}
1168
857/* 1169/*
858 * Close a PACKET socket. This is fairly simple. We immediately go 1170 * Close a PACKET socket. This is fairly simple. We immediately go
859 * to 'closed' state and remove our protocol entry in the device list. 1171 * to 'closed' state and remove our protocol entry in the device list.
@@ -864,6 +1176,9 @@ static int packet_release(struct socket *sock)
864 struct sock *sk = sock->sk; 1176 struct sock *sk = sock->sk;
865 struct packet_sock *po; 1177 struct packet_sock *po;
866 struct net *net; 1178 struct net *net;
1179#ifdef CONFIG_PACKET_MMAP
1180 struct tpacket_req req;
1181#endif
867 1182
868 if (!sk) 1183 if (!sk)
869 return 0; 1184 return 0;
@@ -893,11 +1208,13 @@ static int packet_release(struct socket *sock)
893 packet_flush_mclist(sk); 1208 packet_flush_mclist(sk);
894 1209
895#ifdef CONFIG_PACKET_MMAP 1210#ifdef CONFIG_PACKET_MMAP
896 if (po->pg_vec) { 1211 memset(&req, 0, sizeof(req));
897 struct tpacket_req req; 1212
898 memset(&req, 0, sizeof(req)); 1213 if (po->rx_ring.pg_vec)
899 packet_set_ring(sk, &req, 1); 1214 packet_set_ring(sk, &req, 1, 0);
900 } 1215
1216 if (po->tx_ring.pg_vec)
1217 packet_set_ring(sk, &req, 1, 1);
901#endif 1218#endif
902 1219
903 /* 1220 /*
@@ -1253,9 +1570,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1253 switch (i->type) { 1570 switch (i->type) {
1254 case PACKET_MR_MULTICAST: 1571 case PACKET_MR_MULTICAST:
1255 if (what > 0) 1572 if (what > 0)
1256 dev_mc_add(dev, i->addr, i->alen, 0); 1573 return dev_mc_add(dev, i->addr, i->alen, 0);
1257 else 1574 else
1258 dev_mc_delete(dev, i->addr, i->alen, 0); 1575 return dev_mc_delete(dev, i->addr, i->alen, 0);
1259 break; 1576 break;
1260 case PACKET_MR_PROMISC: 1577 case PACKET_MR_PROMISC:
1261 return dev_set_promiscuity(dev, what); 1578 return dev_set_promiscuity(dev, what);
@@ -1263,6 +1580,12 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1263 case PACKET_MR_ALLMULTI: 1580 case PACKET_MR_ALLMULTI:
1264 return dev_set_allmulti(dev, what); 1581 return dev_set_allmulti(dev, what);
1265 break; 1582 break;
1583 case PACKET_MR_UNICAST:
1584 if (what > 0)
1585 return dev_unicast_add(dev, i->addr);
1586 else
1587 return dev_unicast_delete(dev, i->addr);
1588 break;
1266 default:; 1589 default:;
1267 } 1590 }
1268 return 0; 1591 return 0;
@@ -1391,7 +1714,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1391 if (level != SOL_PACKET) 1714 if (level != SOL_PACKET)
1392 return -ENOPROTOOPT; 1715 return -ENOPROTOOPT;
1393 1716
1394 switch(optname) { 1717 switch (optname) {
1395 case PACKET_ADD_MEMBERSHIP: 1718 case PACKET_ADD_MEMBERSHIP:
1396 case PACKET_DROP_MEMBERSHIP: 1719 case PACKET_DROP_MEMBERSHIP:
1397 { 1720 {
@@ -1415,6 +1738,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1415 1738
1416#ifdef CONFIG_PACKET_MMAP 1739#ifdef CONFIG_PACKET_MMAP
1417 case PACKET_RX_RING: 1740 case PACKET_RX_RING:
1741 case PACKET_TX_RING:
1418 { 1742 {
1419 struct tpacket_req req; 1743 struct tpacket_req req;
1420 1744
@@ -1422,7 +1746,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1422 return -EINVAL; 1746 return -EINVAL;
1423 if (copy_from_user(&req,optval,sizeof(req))) 1747 if (copy_from_user(&req,optval,sizeof(req)))
1424 return -EFAULT; 1748 return -EFAULT;
1425 return packet_set_ring(sk, &req, 0); 1749 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1426 } 1750 }
1427 case PACKET_COPY_THRESH: 1751 case PACKET_COPY_THRESH:
1428 { 1752 {
@@ -1442,7 +1766,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1442 1766
1443 if (optlen != sizeof(val)) 1767 if (optlen != sizeof(val))
1444 return -EINVAL; 1768 return -EINVAL;
1445 if (po->pg_vec) 1769 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1446 return -EBUSY; 1770 return -EBUSY;
1447 if (copy_from_user(&val, optval, sizeof(val))) 1771 if (copy_from_user(&val, optval, sizeof(val)))
1448 return -EFAULT; 1772 return -EFAULT;
@@ -1461,13 +1785,26 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1461 1785
1462 if (optlen != sizeof(val)) 1786 if (optlen != sizeof(val))
1463 return -EINVAL; 1787 return -EINVAL;
1464 if (po->pg_vec) 1788 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1465 return -EBUSY; 1789 return -EBUSY;
1466 if (copy_from_user(&val, optval, sizeof(val))) 1790 if (copy_from_user(&val, optval, sizeof(val)))
1467 return -EFAULT; 1791 return -EFAULT;
1468 po->tp_reserve = val; 1792 po->tp_reserve = val;
1469 return 0; 1793 return 0;
1470 } 1794 }
1795 case PACKET_LOSS:
1796 {
1797 unsigned int val;
1798
1799 if (optlen != sizeof(val))
1800 return -EINVAL;
1801 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1802 return -EBUSY;
1803 if (copy_from_user(&val, optval, sizeof(val)))
1804 return -EFAULT;
1805 po->tp_loss = !!val;
1806 return 0;
1807 }
1471#endif 1808#endif
1472 case PACKET_AUXDATA: 1809 case PACKET_AUXDATA:
1473 { 1810 {
@@ -1517,7 +1854,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1517 if (len < 0) 1854 if (len < 0)
1518 return -EINVAL; 1855 return -EINVAL;
1519 1856
1520 switch(optname) { 1857 switch (optname) {
1521 case PACKET_STATISTICS: 1858 case PACKET_STATISTICS:
1522 if (len > sizeof(struct tpacket_stats)) 1859 if (len > sizeof(struct tpacket_stats))
1523 len = sizeof(struct tpacket_stats); 1860 len = sizeof(struct tpacket_stats);
@@ -1573,6 +1910,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1573 val = po->tp_reserve; 1910 val = po->tp_reserve;
1574 data = &val; 1911 data = &val;
1575 break; 1912 break;
1913 case PACKET_LOSS:
1914 if (len > sizeof(unsigned int))
1915 len = sizeof(unsigned int);
1916 val = po->tp_loss;
1917 data = &val;
1918 break;
1576#endif 1919#endif
1577 default: 1920 default:
1578 return -ENOPROTOOPT; 1921 return -ENOPROTOOPT;
@@ -1643,7 +1986,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
1643{ 1986{
1644 struct sock *sk = sock->sk; 1987 struct sock *sk = sock->sk;
1645 1988
1646 switch(cmd) { 1989 switch (cmd) {
1647 case SIOCOUTQ: 1990 case SIOCOUTQ:
1648 { 1991 {
1649 int amount = atomic_read(&sk->sk_wmem_alloc); 1992 int amount = atomic_read(&sk->sk_wmem_alloc);
@@ -1705,13 +2048,17 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
1705 unsigned int mask = datagram_poll(file, sock, wait); 2048 unsigned int mask = datagram_poll(file, sock, wait);
1706 2049
1707 spin_lock_bh(&sk->sk_receive_queue.lock); 2050 spin_lock_bh(&sk->sk_receive_queue.lock);
1708 if (po->pg_vec) { 2051 if (po->rx_ring.pg_vec) {
1709 unsigned last = po->head ? po->head-1 : po->frame_max; 2052 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
1710
1711 if (packet_lookup_frame(po, last, TP_STATUS_USER))
1712 mask |= POLLIN | POLLRDNORM; 2053 mask |= POLLIN | POLLRDNORM;
1713 } 2054 }
1714 spin_unlock_bh(&sk->sk_receive_queue.lock); 2055 spin_unlock_bh(&sk->sk_receive_queue.lock);
2056 spin_lock_bh(&sk->sk_write_queue.lock);
2057 if (po->tx_ring.pg_vec) {
2058 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2059 mask |= POLLOUT | POLLWRNORM;
2060 }
2061 spin_unlock_bh(&sk->sk_write_queue.lock);
1715 return mask; 2062 return mask;
1716} 2063}
1717 2064
@@ -1788,21 +2135,33 @@ out_free_pgvec:
1788 goto out; 2135 goto out;
1789} 2136}
1790 2137
1791static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing) 2138static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2139 int closing, int tx_ring)
1792{ 2140{
1793 char **pg_vec = NULL; 2141 char **pg_vec = NULL;
1794 struct packet_sock *po = pkt_sk(sk); 2142 struct packet_sock *po = pkt_sk(sk);
1795 int was_running, order = 0; 2143 int was_running, order = 0;
2144 struct packet_ring_buffer *rb;
2145 struct sk_buff_head *rb_queue;
1796 __be16 num; 2146 __be16 num;
1797 int err = 0; 2147 int err;
1798 2148
1799 if (req->tp_block_nr) { 2149 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
1800 int i; 2150 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1801 2151
1802 /* Sanity tests and some calculations */ 2152 err = -EBUSY;
2153 if (!closing) {
2154 if (atomic_read(&po->mapped))
2155 goto out;
2156 if (atomic_read(&rb->pending))
2157 goto out;
2158 }
1803 2159
1804 if (unlikely(po->pg_vec)) 2160 if (req->tp_block_nr) {
1805 return -EBUSY; 2161 /* Sanity tests and some calculations */
2162 err = -EBUSY;
2163 if (unlikely(rb->pg_vec))
2164 goto out;
1806 2165
1807 switch (po->tp_version) { 2166 switch (po->tp_version) {
1808 case TPACKET_V1: 2167 case TPACKET_V1:
@@ -1813,42 +2172,35 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1813 break; 2172 break;
1814 } 2173 }
1815 2174
2175 err = -EINVAL;
1816 if (unlikely((int)req->tp_block_size <= 0)) 2176 if (unlikely((int)req->tp_block_size <= 0))
1817 return -EINVAL; 2177 goto out;
1818 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 2178 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1819 return -EINVAL; 2179 goto out;
1820 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 2180 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1821 po->tp_reserve)) 2181 po->tp_reserve))
1822 return -EINVAL; 2182 goto out;
1823 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 2183 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1824 return -EINVAL; 2184 goto out;
1825 2185
1826 po->frames_per_block = req->tp_block_size/req->tp_frame_size; 2186 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
1827 if (unlikely(po->frames_per_block <= 0)) 2187 if (unlikely(rb->frames_per_block <= 0))
1828 return -EINVAL; 2188 goto out;
1829 if (unlikely((po->frames_per_block * req->tp_block_nr) != 2189 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
1830 req->tp_frame_nr)) 2190 req->tp_frame_nr))
1831 return -EINVAL; 2191 goto out;
1832 2192
1833 err = -ENOMEM; 2193 err = -ENOMEM;
1834 order = get_order(req->tp_block_size); 2194 order = get_order(req->tp_block_size);
1835 pg_vec = alloc_pg_vec(req, order); 2195 pg_vec = alloc_pg_vec(req, order);
1836 if (unlikely(!pg_vec)) 2196 if (unlikely(!pg_vec))
1837 goto out; 2197 goto out;
1838 2198 }
1839 for (i = 0; i < req->tp_block_nr; i++) { 2199 /* Done */
1840 void *ptr = pg_vec[i]; 2200 else {
1841 int k; 2201 err = -EINVAL;
1842
1843 for (k = 0; k < po->frames_per_block; k++) {
1844 __packet_set_status(po, ptr, TP_STATUS_KERNEL);
1845 ptr += req->tp_frame_size;
1846 }
1847 }
1848 /* Done */
1849 } else {
1850 if (unlikely(req->tp_frame_nr)) 2202 if (unlikely(req->tp_frame_nr))
1851 return -EINVAL; 2203 goto out;
1852 } 2204 }
1853 2205
1854 lock_sock(sk); 2206 lock_sock(sk);
@@ -1872,23 +2224,24 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1872 if (closing || atomic_read(&po->mapped) == 0) { 2224 if (closing || atomic_read(&po->mapped) == 0) {
1873 err = 0; 2225 err = 0;
1874#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) 2226#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1875 2227 spin_lock_bh(&rb_queue->lock);
1876 spin_lock_bh(&sk->sk_receive_queue.lock); 2228 pg_vec = XC(rb->pg_vec, pg_vec);
1877 pg_vec = XC(po->pg_vec, pg_vec); 2229 rb->frame_max = (req->tp_frame_nr - 1);
1878 po->frame_max = (req->tp_frame_nr - 1); 2230 rb->head = 0;
1879 po->head = 0; 2231 rb->frame_size = req->tp_frame_size;
1880 po->frame_size = req->tp_frame_size; 2232 spin_unlock_bh(&rb_queue->lock);
1881 spin_unlock_bh(&sk->sk_receive_queue.lock); 2233
1882 2234 order = XC(rb->pg_vec_order, order);
1883 order = XC(po->pg_vec_order, order); 2235 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
1884 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr); 2236
1885 2237 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1886 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 2238 po->prot_hook.func = (po->rx_ring.pg_vec) ?
1887 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv; 2239 tpacket_rcv : packet_rcv;
1888 skb_queue_purge(&sk->sk_receive_queue); 2240 skb_queue_purge(rb_queue);
1889#undef XC 2241#undef XC
1890 if (atomic_read(&po->mapped)) 2242 if (atomic_read(&po->mapped))
1891 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); 2243 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n",
2244 atomic_read(&po->mapped));
1892 } 2245 }
1893 mutex_unlock(&po->pg_vec_lock); 2246 mutex_unlock(&po->pg_vec_lock);
1894 2247
@@ -1909,11 +2262,13 @@ out:
1909 return err; 2262 return err;
1910} 2263}
1911 2264
1912static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2265static int packet_mmap(struct file *file, struct socket *sock,
2266 struct vm_area_struct *vma)
1913{ 2267{
1914 struct sock *sk = sock->sk; 2268 struct sock *sk = sock->sk;
1915 struct packet_sock *po = pkt_sk(sk); 2269 struct packet_sock *po = pkt_sk(sk);
1916 unsigned long size; 2270 unsigned long size, expected_size;
2271 struct packet_ring_buffer *rb;
1917 unsigned long start; 2272 unsigned long start;
1918 int err = -EINVAL; 2273 int err = -EINVAL;
1919 int i; 2274 int i;
@@ -1921,26 +2276,43 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
1921 if (vma->vm_pgoff) 2276 if (vma->vm_pgoff)
1922 return -EINVAL; 2277 return -EINVAL;
1923 2278
1924 size = vma->vm_end - vma->vm_start;
1925
1926 mutex_lock(&po->pg_vec_lock); 2279 mutex_lock(&po->pg_vec_lock);
1927 if (po->pg_vec == NULL) 2280
2281 expected_size = 0;
2282 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2283 if (rb->pg_vec) {
2284 expected_size += rb->pg_vec_len
2285 * rb->pg_vec_pages
2286 * PAGE_SIZE;
2287 }
2288 }
2289
2290 if (expected_size == 0)
1928 goto out; 2291 goto out;
1929 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) 2292
2293 size = vma->vm_end - vma->vm_start;
2294 if (size != expected_size)
1930 goto out; 2295 goto out;
1931 2296
1932 start = vma->vm_start; 2297 start = vma->vm_start;
1933 for (i = 0; i < po->pg_vec_len; i++) { 2298 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
1934 struct page *page = virt_to_page(po->pg_vec[i]); 2299 if (rb->pg_vec == NULL)
1935 int pg_num; 2300 continue;
1936 2301
1937 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) { 2302 for (i = 0; i < rb->pg_vec_len; i++) {
1938 err = vm_insert_page(vma, start, page); 2303 struct page *page = virt_to_page(rb->pg_vec[i]);
1939 if (unlikely(err)) 2304 int pg_num;
1940 goto out; 2305
1941 start += PAGE_SIZE; 2306 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2307 pg_num++,page++) {
2308 err = vm_insert_page(vma, start, page);
2309 if (unlikely(err))
2310 goto out;
2311 start += PAGE_SIZE;
2312 }
1942 } 2313 }
1943 } 2314 }
2315
1944 atomic_inc(&po->mapped); 2316 atomic_inc(&po->mapped);
1945 vma->vm_ops = &packet_mmap_ops; 2317 vma->vm_ops = &packet_mmap_ops;
1946 err = 0; 2318 err = 0;
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 4aa888584d20..851f6a3f8ddd 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -212,8 +212,9 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
212 dev->stats.tx_bytes += len; 212 dev->stats.tx_bytes += len;
213 } 213 }
214 214
215 if (!pep_writeable(sk)) 215 netif_stop_queue(dev);
216 netif_stop_queue(dev); 216 if (pep_writeable(sk))
217 netif_wake_queue(dev);
217 return 0; 218 return 0;
218} 219}
219 220
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 20cf16fc572f..b11e7e527864 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -35,7 +35,6 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/version.h>
39#include <net/sock.h> 38#include <net/sock.h>
40 39
41#include "rds.h" 40#include "rds.h"
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 273f064930a8..d14445c48304 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -148,14 +148,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
148 if (conn) 148 if (conn)
149 goto out; 149 goto out;
150 150
151 conn = kmem_cache_alloc(rds_conn_slab, gfp); 151 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
152 if (conn == NULL) { 152 if (conn == NULL) {
153 conn = ERR_PTR(-ENOMEM); 153 conn = ERR_PTR(-ENOMEM);
154 goto out; 154 goto out;
155 } 155 }
156 156
157 memset(conn, 0, sizeof(*conn));
158
159 INIT_HLIST_NODE(&conn->c_hash_node); 157 INIT_HLIST_NODE(&conn->c_hash_node);
160 conn->c_version = RDS_PROTOCOL_3_0; 158 conn->c_version = RDS_PROTOCOL_3_0;
161 conn->c_laddr = laddr; 159 conn->c_laddr = laddr;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 4933b380985e..b9bcd32431e1 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -224,8 +224,8 @@ static int rds_ib_laddr_check(__be32 addr)
224 * IB and iWARP capable NICs. 224 * IB and iWARP capable NICs.
225 */ 225 */
226 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 226 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
227 if (!cm_id) 227 if (IS_ERR(cm_id))
228 return -EADDRNOTAVAIL; 228 return PTR_ERR(cm_id);
229 229
230 memset(&sin, 0, sizeof(sin)); 230 memset(&sin, 0, sizeof(sin));
231 sin.sin_family = AF_INET; 231 sin.sin_family = AF_INET;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 069206cae733..455ae73047fe 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -333,7 +333,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
333void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); 333void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
334void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); 334void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
335int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, 335int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
336 u32 *adv_credits, int need_posted); 336 u32 *adv_credits, int need_posted, int max_posted);
337 337
338/* ib_stats.c */ 338/* ib_stats.c */
339DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); 339DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 36d931573ff4..5709bad28329 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -524,7 +524,7 @@ void rds_ib_attempt_ack(struct rds_ib_connection *ic)
524 } 524 }
525 525
526 /* Can we get a send credit? */ 526 /* Can we get a send credit? */
527 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0)) { 527 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
528 rds_ib_stats_inc(s_ib_tx_throttle); 528 rds_ib_stats_inc(s_ib_tx_throttle);
529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
530 return; 530 return;
diff --git a/net/rds/ib_ring.c b/net/rds/ib_ring.c
index 99a6ccae964c..ff97e8eda858 100644
--- a/net/rds/ib_ring.c
+++ b/net/rds/ib_ring.c
@@ -137,7 +137,7 @@ int rds_ib_ring_empty(struct rds_ib_work_ring *ring)
137 137
138int rds_ib_ring_low(struct rds_ib_work_ring *ring) 138int rds_ib_ring_low(struct rds_ib_work_ring *ring)
139{ 139{
140 return __rds_ib_ring_used(ring) <= (ring->w_nr >> 2); 140 return __rds_ib_ring_used(ring) <= (ring->w_nr >> 1);
141} 141}
142 142
143/* 143/*
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index cb6c52cb1c4c..23bf830db2d5 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -311,7 +311,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
311 * and using atomic_cmpxchg when updating the two counters. 311 * and using atomic_cmpxchg when updating the two counters.
312 */ 312 */
313int rds_ib_send_grab_credits(struct rds_ib_connection *ic, 313int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
314 u32 wanted, u32 *adv_credits, int need_posted) 314 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
315{ 315{
316 unsigned int avail, posted, got = 0, advertise; 316 unsigned int avail, posted, got = 0, advertise;
317 long oldval, newval; 317 long oldval, newval;
@@ -351,7 +351,7 @@ try_again:
351 * available. 351 * available.
352 */ 352 */
353 if (posted && (got || need_posted)) { 353 if (posted && (got || need_posted)) {
354 advertise = min_t(unsigned int, posted, RDS_MAX_ADV_CREDIT); 354 advertise = min_t(unsigned int, posted, max_posted);
355 newval -= IB_SET_POST_CREDITS(advertise); 355 newval -= IB_SET_POST_CREDITS(advertise);
356 } 356 }
357 357
@@ -498,7 +498,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
498 498
499 credit_alloc = work_alloc; 499 credit_alloc = work_alloc;
500 if (ic->i_flowctl) { 500 if (ic->i_flowctl) {
501 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0); 501 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
502 adv_credits += posted; 502 adv_credits += posted;
503 if (credit_alloc < work_alloc) { 503 if (credit_alloc < work_alloc) {
504 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 504 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
@@ -506,7 +506,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
506 flow_controlled++; 506 flow_controlled++;
507 } 507 }
508 if (work_alloc == 0) { 508 if (work_alloc == 0) {
509 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 509 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
510 rds_ib_stats_inc(s_ib_tx_throttle); 510 rds_ib_stats_inc(s_ib_tx_throttle);
511 ret = -ENOMEM; 511 ret = -ENOMEM;
512 goto out; 512 goto out;
@@ -571,7 +571,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
571 /* 571 /*
572 * Update adv_credits since we reset the ACK_REQUIRED bit. 572 * Update adv_credits since we reset the ACK_REQUIRED bit.
573 */ 573 */
574 rds_ib_send_grab_credits(ic, 0, &posted, 1); 574 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
575 adv_credits += posted; 575 adv_credits += posted;
576 BUG_ON(adv_credits > 255); 576 BUG_ON(adv_credits > 255);
577 } else if (ic->i_rm != rm) 577 } else if (ic->i_rm != rm)
diff --git a/net/rds/info.c b/net/rds/info.c
index 1d885535214d..62aeef37aefe 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -188,10 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
188 ret = -ENOMEM; 188 ret = -ENOMEM;
189 goto out; 189 goto out;
190 } 190 }
191 down_read(&current->mm->mmap_sem); 191 ret = get_user_pages_fast(start, nr_pages, 1, pages);
192 ret = get_user_pages(current, current->mm, start, nr_pages, 1, 0,
193 pages, NULL);
194 up_read(&current->mm->mmap_sem);
195 if (ret != nr_pages) { 192 if (ret != nr_pages) {
196 if (ret > 0) 193 if (ret > 0)
197 nr_pages = ret; 194 nr_pages = ret;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index b732efb5b634..d16e1cbc8e83 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -233,8 +233,8 @@ static int rds_iw_laddr_check(__be32 addr)
233 * IB and iWARP capable NICs. 233 * IB and iWARP capable NICs.
234 */ 234 */
235 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 235 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
236 if (!cm_id) 236 if (IS_ERR(cm_id))
237 return -EADDRNOTAVAIL; 237 return PTR_ERR(cm_id);
238 238
239 memset(&sin, 0, sizeof(sin)); 239 memset(&sin, 0, sizeof(sin));
240 sin.sin_family = AF_INET; 240 sin.sin_family = AF_INET;
diff --git a/net/rds/iw.h b/net/rds/iw.h
index b4fb27252895..0715dde323e7 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -361,7 +361,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); 361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); 362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, 363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
364 u32 *adv_credits, int need_posted); 364 u32 *adv_credits, int need_posted, int max_posted);
365 365
366/* ib_stats.c */ 366/* ib_stats.c */
367DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats); 367DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats);
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index fde470fa50d5..8683f5f66c4b 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -524,7 +524,7 @@ void rds_iw_attempt_ack(struct rds_iw_connection *ic)
524 } 524 }
525 525
526 /* Can we get a send credit? */ 526 /* Can we get a send credit? */
527 if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0)) { 527 if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
528 rds_iw_stats_inc(s_iw_tx_throttle); 528 rds_iw_stats_inc(s_iw_tx_throttle);
529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
530 return; 530 return;
diff --git a/net/rds/iw_ring.c b/net/rds/iw_ring.c
index d422d4b5deef..da8e3b63f663 100644
--- a/net/rds/iw_ring.c
+++ b/net/rds/iw_ring.c
@@ -137,7 +137,7 @@ int rds_iw_ring_empty(struct rds_iw_work_ring *ring)
137 137
138int rds_iw_ring_low(struct rds_iw_work_ring *ring) 138int rds_iw_ring_low(struct rds_iw_work_ring *ring)
139{ 139{
140 return __rds_iw_ring_used(ring) <= (ring->w_nr >> 2); 140 return __rds_iw_ring_used(ring) <= (ring->w_nr >> 1);
141} 141}
142 142
143 143
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 22dd38ffd608..44a6a0551f28 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -347,7 +347,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
347 * and using atomic_cmpxchg when updating the two counters. 347 * and using atomic_cmpxchg when updating the two counters.
348 */ 348 */
349int rds_iw_send_grab_credits(struct rds_iw_connection *ic, 349int rds_iw_send_grab_credits(struct rds_iw_connection *ic,
350 u32 wanted, u32 *adv_credits, int need_posted) 350 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
351{ 351{
352 unsigned int avail, posted, got = 0, advertise; 352 unsigned int avail, posted, got = 0, advertise;
353 long oldval, newval; 353 long oldval, newval;
@@ -387,7 +387,7 @@ try_again:
387 * available. 387 * available.
388 */ 388 */
389 if (posted && (got || need_posted)) { 389 if (posted && (got || need_posted)) {
390 advertise = min_t(unsigned int, posted, RDS_MAX_ADV_CREDIT); 390 advertise = min_t(unsigned int, posted, max_posted);
391 newval -= IB_SET_POST_CREDITS(advertise); 391 newval -= IB_SET_POST_CREDITS(advertise);
392 } 392 }
393 393
@@ -541,7 +541,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
541 541
542 credit_alloc = work_alloc; 542 credit_alloc = work_alloc;
543 if (ic->i_flowctl) { 543 if (ic->i_flowctl) {
544 credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0); 544 credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
545 adv_credits += posted; 545 adv_credits += posted;
546 if (credit_alloc < work_alloc) { 546 if (credit_alloc < work_alloc) {
547 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 547 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
@@ -549,7 +549,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
549 flow_controlled++; 549 flow_controlled++;
550 } 550 }
551 if (work_alloc == 0) { 551 if (work_alloc == 0) {
552 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); 552 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
553 rds_iw_stats_inc(s_iw_tx_throttle); 553 rds_iw_stats_inc(s_iw_tx_throttle);
554 ret = -ENOMEM; 554 ret = -ENOMEM;
555 goto out; 555 goto out;
@@ -614,7 +614,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
614 /* 614 /*
615 * Update adv_credits since we reset the ACK_REQUIRED bit. 615 * Update adv_credits since we reset the ACK_REQUIRED bit.
616 */ 616 */
617 rds_iw_send_grab_credits(ic, 0, &posted, 1); 617 rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
618 adv_credits += posted; 618 adv_credits += posted;
619 BUG_ON(adv_credits > 255); 619 BUG_ON(adv_credits > 255);
620 } else if (ic->i_rm != rm) 620 } else if (ic->i_rm != rm)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index eaeeb91e1119..8dc83d2caa58 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -150,12 +150,9 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
150{ 150{
151 int ret; 151 int ret;
152 152
153 down_read(&current->mm->mmap_sem); 153 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
154 ret = get_user_pages(current, current->mm, user_addr,
155 nr_pages, write, 0, pages, NULL);
156 up_read(&current->mm->mmap_sem);
157 154
158 if (0 <= ret && (unsigned) ret < nr_pages) { 155 if (ret >= 0 && ret < nr_pages) {
159 while (ret--) 156 while (ret--)
160 put_page(pages[ret]); 157 put_page(pages[ret]);
161 ret = -EFAULT; 158 ret = -EFAULT;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7b19024f9706..7d0f901c93d5 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -34,7 +34,7 @@
34 34
35#include "rdma_transport.h" 35#include "rdma_transport.h"
36 36
37static struct rdma_cm_id *rds_iw_listen_id; 37static struct rdma_cm_id *rds_rdma_listen_id;
38 38
39int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, 39int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
40 struct rdma_cm_event *event) 40 struct rdma_cm_event *event)
@@ -161,7 +161,7 @@ static int __init rds_rdma_listen_init(void)
161 161
162 rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT); 162 rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT);
163 163
164 rds_iw_listen_id = cm_id; 164 rds_rdma_listen_id = cm_id;
165 cm_id = NULL; 165 cm_id = NULL;
166out: 166out:
167 if (cm_id) 167 if (cm_id)
@@ -171,10 +171,10 @@ out:
171 171
172static void rds_rdma_listen_stop(void) 172static void rds_rdma_listen_stop(void)
173{ 173{
174 if (rds_iw_listen_id) { 174 if (rds_rdma_listen_id) {
175 rdsdebug("cm %p\n", rds_iw_listen_id); 175 rdsdebug("cm %p\n", rds_rdma_listen_id);
176 rdma_destroy_id(rds_iw_listen_id); 176 rdma_destroy_id(rds_rdma_listen_id);
177 rds_iw_listen_id = NULL; 177 rds_rdma_listen_id = NULL;
178 } 178 }
179} 179}
180 180
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 71794449ca4e..dbe111236783 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -132,7 +132,7 @@ struct rds_connection {
132#define RDS_FLAG_CONG_BITMAP 0x01 132#define RDS_FLAG_CONG_BITMAP 0x01
133#define RDS_FLAG_ACK_REQUIRED 0x02 133#define RDS_FLAG_ACK_REQUIRED 0x02
134#define RDS_FLAG_RETRANSMITTED 0x04 134#define RDS_FLAG_RETRANSMITTED 0x04
135#define RDS_MAX_ADV_CREDIT 127 135#define RDS_MAX_ADV_CREDIT 255
136 136
137/* 137/*
138 * Maximum space available for extension headers. 138 * Maximum space available for extension headers.
diff --git a/net/rds/send.c b/net/rds/send.c
index 104fe033203d..a4a7f428cd76 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -854,11 +854,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
854 854
855 rm->m_daddr = daddr; 855 rm->m_daddr = daddr;
856 856
857 /* Parse any control messages the user may have included. */
858 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
859 if (ret)
860 goto out;
861
862 /* rds_conn_create has a spinlock that runs with IRQ off. 857 /* rds_conn_create has a spinlock that runs with IRQ off.
863 * Caching the conn in the socket helps a lot. */ 858 * Caching the conn in the socket helps a lot. */
864 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) 859 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
@@ -874,6 +869,11 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
874 rs->rs_conn = conn; 869 rs->rs_conn = conn;
875 } 870 }
876 871
872 /* Parse any control messages the user may have included. */
873 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
874 if (ret)
875 goto out;
876
877 if ((rm->m_rdma_cookie || rm->m_rdma_op) 877 if ((rm->m_rdma_cookie || rm->m_rdma_op)
878 && conn->c_trans->xmit_rdma == NULL) { 878 && conn->c_trans->xmit_rdma == NULL) {
879 if (printk_ratelimit()) 879 if (printk_ratelimit())
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index 84efde97c5a7..60a34f3b5f65 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -47,12 +47,6 @@ enum rfkill_global_sched_op {
47 RFKILL_GLOBAL_OP_UNBLOCK, 47 RFKILL_GLOBAL_OP_UNBLOCK,
48}; 48};
49 49
50/*
51 * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
52 * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
53 * future, when such events are added, that code will be necessary.
54 */
55
56struct rfkill_task { 50struct rfkill_task {
57 struct delayed_work dwork; 51 struct delayed_work dwork;
58 52
@@ -65,14 +59,6 @@ struct rfkill_task {
65 /* pending regular switch operations (1=pending) */ 59 /* pending regular switch operations (1=pending) */
66 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; 60 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
67 61
68#ifdef RFKILL_NEED_SWSET
69 /* set operation pending (1=pending) */
70 unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
71
72 /* desired state for pending set operation (1=unblock) */
73 unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
74#endif
75
76 /* should the state be complemented (1=yes) */ 62 /* should the state be complemented (1=yes) */
77 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; 63 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
78 64
@@ -111,24 +97,6 @@ static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
111 } 97 }
112} 98}
113 99
114#ifdef RFKILL_NEED_SWSET
115static void __rfkill_handle_normal_op(const enum rfkill_type type,
116 const bool sp, const bool s, const bool c)
117{
118 enum rfkill_state state;
119
120 if (sp)
121 state = (s) ? RFKILL_STATE_UNBLOCKED :
122 RFKILL_STATE_SOFT_BLOCKED;
123 else
124 state = rfkill_get_global_state(type);
125
126 if (c)
127 state = rfkill_state_complement(state);
128
129 rfkill_switch_all(type, state);
130}
131#else
132static void __rfkill_handle_normal_op(const enum rfkill_type type, 100static void __rfkill_handle_normal_op(const enum rfkill_type type,
133 const bool c) 101 const bool c)
134{ 102{
@@ -140,7 +108,6 @@ static void __rfkill_handle_normal_op(const enum rfkill_type type,
140 108
141 rfkill_switch_all(type, state); 109 rfkill_switch_all(type, state);
142} 110}
143#endif
144 111
145static void rfkill_task_handler(struct work_struct *work) 112static void rfkill_task_handler(struct work_struct *work)
146{ 113{
@@ -171,21 +138,11 @@ static void rfkill_task_handler(struct work_struct *work)
171 i < RFKILL_TYPE_MAX) { 138 i < RFKILL_TYPE_MAX) {
172 if (test_and_clear_bit(i, task->sw_pending)) { 139 if (test_and_clear_bit(i, task->sw_pending)) {
173 bool c; 140 bool c;
174#ifdef RFKILL_NEED_SWSET
175 bool sp, s;
176 sp = test_and_clear_bit(i,
177 task->sw_setpending);
178 s = test_bit(i, task->sw_newstate);
179#endif
180 c = test_and_clear_bit(i, 141 c = test_and_clear_bit(i,
181 task->sw_togglestate); 142 task->sw_togglestate);
182 spin_unlock_irq(&task->lock); 143 spin_unlock_irq(&task->lock);
183 144
184#ifdef RFKILL_NEED_SWSET
185 __rfkill_handle_normal_op(i, sp, s, c);
186#else
187 __rfkill_handle_normal_op(i, c); 145 __rfkill_handle_normal_op(i, c);
188#endif
189 146
190 spin_lock_irq(&task->lock); 147 spin_lock_irq(&task->lock);
191 } 148 }
@@ -238,32 +195,6 @@ static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
238 spin_unlock_irqrestore(&rfkill_task.lock, flags); 195 spin_unlock_irqrestore(&rfkill_task.lock, flags);
239} 196}
240 197
241#ifdef RFKILL_NEED_SWSET
242/* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
243
244static void rfkill_schedule_set(enum rfkill_type type,
245 enum rfkill_state desired_state)
246{
247 unsigned long flags;
248
249 if (rfkill_is_epo_lock_active())
250 return;
251
252 spin_lock_irqsave(&rfkill_task.lock, flags);
253 if (!rfkill_task.global_op_pending) {
254 set_bit(type, rfkill_task.sw_pending);
255 set_bit(type, rfkill_task.sw_setpending);
256 clear_bit(type, rfkill_task.sw_togglestate);
257 if (desired_state)
258 set_bit(type, rfkill_task.sw_newstate);
259 else
260 clear_bit(type, rfkill_task.sw_newstate);
261 rfkill_schedule_ratelimited();
262 }
263 spin_unlock_irqrestore(&rfkill_task.lock, flags);
264}
265#endif
266
267static void rfkill_schedule_toggle(enum rfkill_type type) 198static void rfkill_schedule_toggle(enum rfkill_type type)
268{ 199{
269 unsigned long flags; 200 unsigned long flags;
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 3eaa39403c13..4f5a83183c95 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -75,6 +75,11 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
75 75
76 rfkill_led_trigger(rfkill, rfkill->state); 76 rfkill_led_trigger(rfkill, rfkill->state);
77} 77}
78#else
79static inline void rfkill_led_trigger(struct rfkill *rfkill,
80 enum rfkill_state state)
81{
82}
78#endif /* CONFIG_RFKILL_LEDS */ 83#endif /* CONFIG_RFKILL_LEDS */
79 84
80static void rfkill_uevent(struct rfkill *rfkill) 85static void rfkill_uevent(struct rfkill *rfkill)
@@ -96,6 +101,7 @@ static void update_rfkill_state(struct rfkill *rfkill)
96 } 101 }
97 mutex_unlock(&rfkill->mutex); 102 mutex_unlock(&rfkill->mutex);
98 } 103 }
104 rfkill_led_trigger(rfkill, rfkill->state);
99} 105}
100 106
101/** 107/**
@@ -136,8 +142,9 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
136 oldstate = rfkill->state; 142 oldstate = rfkill->state;
137 143
138 if (rfkill->get_state && !force && 144 if (rfkill->get_state && !force &&
139 !rfkill->get_state(rfkill->data, &newstate)) 145 !rfkill->get_state(rfkill->data, &newstate)) {
140 rfkill->state = newstate; 146 rfkill->state = newstate;
147 }
141 148
142 switch (state) { 149 switch (state) {
143 case RFKILL_STATE_HARD_BLOCKED: 150 case RFKILL_STATE_HARD_BLOCKED:
@@ -172,6 +179,7 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
172 if (force || rfkill->state != oldstate) 179 if (force || rfkill->state != oldstate)
173 rfkill_uevent(rfkill); 180 rfkill_uevent(rfkill);
174 181
182 rfkill_led_trigger(rfkill, rfkill->state);
175 return retval; 183 return retval;
176} 184}
177 185
@@ -200,10 +208,11 @@ static void __rfkill_switch_all(const enum rfkill_type type,
200 208
201 rfkill_global_states[type].current_state = state; 209 rfkill_global_states[type].current_state = state;
202 list_for_each_entry(rfkill, &rfkill_list, node) { 210 list_for_each_entry(rfkill, &rfkill_list, node) {
203 if ((!rfkill->user_claim) && (rfkill->type == type)) { 211 if (rfkill->type == type) {
204 mutex_lock(&rfkill->mutex); 212 mutex_lock(&rfkill->mutex);
205 rfkill_toggle_radio(rfkill, state, 0); 213 rfkill_toggle_radio(rfkill, state, 0);
206 mutex_unlock(&rfkill->mutex); 214 mutex_unlock(&rfkill->mutex);
215 rfkill_led_trigger(rfkill, rfkill->state);
207 } 216 }
208 } 217 }
209} 218}
@@ -256,6 +265,7 @@ void rfkill_epo(void)
256 RFKILL_STATE_SOFT_BLOCKED; 265 RFKILL_STATE_SOFT_BLOCKED;
257 } 266 }
258 mutex_unlock(&rfkill_global_mutex); 267 mutex_unlock(&rfkill_global_mutex);
268 rfkill_led_trigger(rfkill, rfkill->state);
259} 269}
260EXPORT_SYMBOL_GPL(rfkill_epo); 270EXPORT_SYMBOL_GPL(rfkill_epo);
261 271
@@ -358,6 +368,7 @@ int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
358 rfkill_uevent(rfkill); 368 rfkill_uevent(rfkill);
359 369
360 mutex_unlock(&rfkill->mutex); 370 mutex_unlock(&rfkill->mutex);
371 rfkill_led_trigger(rfkill, rfkill->state);
361 372
362 return 0; 373 return 0;
363} 374}
@@ -447,53 +458,14 @@ static ssize_t rfkill_claim_show(struct device *dev,
447 struct device_attribute *attr, 458 struct device_attribute *attr,
448 char *buf) 459 char *buf)
449{ 460{
450 struct rfkill *rfkill = to_rfkill(dev); 461 return sprintf(buf, "%d\n", 0);
451
452 return sprintf(buf, "%d\n", rfkill->user_claim);
453} 462}
454 463
455static ssize_t rfkill_claim_store(struct device *dev, 464static ssize_t rfkill_claim_store(struct device *dev,
456 struct device_attribute *attr, 465 struct device_attribute *attr,
457 const char *buf, size_t count) 466 const char *buf, size_t count)
458{ 467{
459 struct rfkill *rfkill = to_rfkill(dev); 468 return -EOPNOTSUPP;
460 unsigned long claim_tmp;
461 bool claim;
462 int error;
463
464 if (!capable(CAP_NET_ADMIN))
465 return -EPERM;
466
467 if (rfkill->user_claim_unsupported)
468 return -EOPNOTSUPP;
469
470 error = strict_strtoul(buf, 0, &claim_tmp);
471 if (error)
472 return error;
473 claim = !!claim_tmp;
474
475 /*
476 * Take the global lock to make sure the kernel is not in
477 * the middle of rfkill_switch_all
478 */
479 error = mutex_lock_killable(&rfkill_global_mutex);
480 if (error)
481 return error;
482
483 if (rfkill->user_claim != claim) {
484 if (!claim && !rfkill_epo_lock_active) {
485 mutex_lock(&rfkill->mutex);
486 rfkill_toggle_radio(rfkill,
487 rfkill_global_states[rfkill->type].current_state,
488 0);
489 mutex_unlock(&rfkill->mutex);
490 }
491 rfkill->user_claim = claim;
492 }
493
494 mutex_unlock(&rfkill_global_mutex);
495
496 return error ? error : count;
497} 469}
498 470
499static struct device_attribute rfkill_dev_attrs[] = { 471static struct device_attribute rfkill_dev_attrs[] = {
@@ -559,6 +531,7 @@ static int rfkill_resume(struct device *dev)
559 1); 531 1);
560 532
561 mutex_unlock(&rfkill->mutex); 533 mutex_unlock(&rfkill->mutex);
534 rfkill_led_trigger(rfkill, rfkill->state);
562 } 535 }
563 536
564 return 0; 537 return 0;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index cc29b44b1500..0f815cc6a3db 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -62,13 +62,7 @@ static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
62 62
63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
64{ 64{
65 if (!cgroup_lock_live_group(cgrp))
66 return -ENODEV;
67
68 cgrp_cls_state(cgrp)->classid = (u32) value; 65 cgrp_cls_state(cgrp)->classid = (u32) value;
69
70 cgroup_unlock();
71
72 return 0; 66 return 0;
73} 67}
74 68
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5f5efe4e6072..27d03816ec3e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -196,6 +196,21 @@ void __qdisc_run(struct Qdisc *q)
196 clear_bit(__QDISC_STATE_RUNNING, &q->state); 196 clear_bit(__QDISC_STATE_RUNNING, &q->state);
197} 197}
198 198
199unsigned long dev_trans_start(struct net_device *dev)
200{
201 unsigned long val, res = dev->trans_start;
202 unsigned int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++) {
205 val = netdev_get_tx_queue(dev, i)->trans_start;
206 if (val && time_after(val, res))
207 res = val;
208 }
209 dev->trans_start = res;
210 return res;
211}
212EXPORT_SYMBOL(dev_trans_start);
213
199static void dev_watchdog(unsigned long arg) 214static void dev_watchdog(unsigned long arg)
200{ 215{
201 struct net_device *dev = (struct net_device *)arg; 216 struct net_device *dev = (struct net_device *)arg;
@@ -205,25 +220,30 @@ static void dev_watchdog(unsigned long arg)
205 if (netif_device_present(dev) && 220 if (netif_device_present(dev) &&
206 netif_running(dev) && 221 netif_running(dev) &&
207 netif_carrier_ok(dev)) { 222 netif_carrier_ok(dev)) {
208 int some_queue_stopped = 0; 223 int some_queue_timedout = 0;
209 unsigned int i; 224 unsigned int i;
225 unsigned long trans_start;
210 226
211 for (i = 0; i < dev->num_tx_queues; i++) { 227 for (i = 0; i < dev->num_tx_queues; i++) {
212 struct netdev_queue *txq; 228 struct netdev_queue *txq;
213 229
214 txq = netdev_get_tx_queue(dev, i); 230 txq = netdev_get_tx_queue(dev, i);
215 if (netif_tx_queue_stopped(txq)) { 231 /*
216 some_queue_stopped = 1; 232 * old device drivers set dev->trans_start
233 */
234 trans_start = txq->trans_start ? : dev->trans_start;
235 if (netif_tx_queue_stopped(txq) &&
236 time_after(jiffies, (trans_start +
237 dev->watchdog_timeo))) {
238 some_queue_timedout = 1;
217 break; 239 break;
218 } 240 }
219 } 241 }
220 242
221 if (some_queue_stopped && 243 if (some_queue_timedout) {
222 time_after(jiffies, (dev->trans_start +
223 dev->watchdog_timeo))) {
224 char drivername[64]; 244 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 245 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64)); 246 dev->name, netdev_drivername(dev, drivername, 64), i);
227 dev->netdev_ops->ndo_tx_timeout(dev); 247 dev->netdev_ops->ndo_tx_timeout(dev);
228 } 248 }
229 if (!mod_timer(&dev->watchdog_timer, 249 if (!mod_timer(&dev->watchdog_timer,
@@ -602,8 +622,10 @@ static void transition_one_qdisc(struct net_device *dev,
602 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 622 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
603 623
604 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 624 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
605 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) 625 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
626 dev_queue->trans_start = 0;
606 *need_watchdog_p = 1; 627 *need_watchdog_p = 1;
628 }
607} 629}
608 630
609void dev_activate(struct net_device *dev) 631void dev_activate(struct net_device *dev)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3b6418297231..a886496bdc3a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -58,7 +58,6 @@ struct teql_master
58 struct net_device *dev; 58 struct net_device *dev;
59 struct Qdisc *slaves; 59 struct Qdisc *slaves;
60 struct list_head master_list; 60 struct list_head master_list;
61 struct net_device_stats stats;
62}; 61};
63 62
64struct teql_sched_data 63struct teql_sched_data
@@ -272,6 +271,7 @@ static inline int teql_resolve(struct sk_buff *skb,
272static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 271static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
273{ 272{
274 struct teql_master *master = netdev_priv(dev); 273 struct teql_master *master = netdev_priv(dev);
274 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
275 struct Qdisc *start, *q; 275 struct Qdisc *start, *q;
276 int busy; 276 int busy;
277 int nores; 277 int nores;
@@ -308,11 +308,12 @@ restart:
308 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
309 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
310 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
311 txq_trans_update(slave_txq);
311 __netif_tx_unlock(slave_txq); 312 __netif_tx_unlock(slave_txq);
312 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
313 netif_wake_queue(dev); 314 netif_wake_queue(dev);
314 master->stats.tx_packets++; 315 txq->tx_packets++;
315 master->stats.tx_bytes += length; 316 txq->tx_bytes += length;
316 return 0; 317 return 0;
317 } 318 }
318 __netif_tx_unlock(slave_txq); 319 __netif_tx_unlock(slave_txq);
@@ -339,10 +340,10 @@ restart:
339 netif_stop_queue(dev); 340 netif_stop_queue(dev);
340 return 1; 341 return 1;
341 } 342 }
342 master->stats.tx_errors++; 343 dev->stats.tx_errors++;
343 344
344drop: 345drop:
345 master->stats.tx_dropped++; 346 txq->tx_dropped++;
346 dev_kfree_skb(skb); 347 dev_kfree_skb(skb);
347 return 0; 348 return 0;
348} 349}
@@ -395,12 +396,6 @@ static int teql_master_close(struct net_device *dev)
395 return 0; 396 return 0;
396} 397}
397 398
398static struct net_device_stats *teql_master_stats(struct net_device *dev)
399{
400 struct teql_master *m = netdev_priv(dev);
401 return &m->stats;
402}
403
404static int teql_master_mtu(struct net_device *dev, int new_mtu) 399static int teql_master_mtu(struct net_device *dev, int new_mtu)
405{ 400{
406 struct teql_master *m = netdev_priv(dev); 401 struct teql_master *m = netdev_priv(dev);
@@ -425,7 +420,6 @@ static const struct net_device_ops teql_netdev_ops = {
425 .ndo_open = teql_master_open, 420 .ndo_open = teql_master_open,
426 .ndo_stop = teql_master_close, 421 .ndo_stop = teql_master_close,
427 .ndo_start_xmit = teql_master_xmit, 422 .ndo_start_xmit = teql_master_xmit,
428 .ndo_get_stats = teql_master_stats,
429 .ndo_change_mtu = teql_master_mtu, 423 .ndo_change_mtu = teql_master_mtu,
430}; 424};
431 425
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7d08f522ec84..f0c91df59d4e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -412,6 +412,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
412 412
413 /* Build the SCTP header. */ 413 /* Build the SCTP header. */
414 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); 414 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
415 skb_reset_transport_header(nskb);
415 sh->source = htons(packet->source_port); 416 sh->source = htons(packet->source_port);
416 sh->dest = htons(packet->destination_port); 417 sh->dest = htons(packet->destination_port);
417 418
@@ -527,15 +528,25 @@ int sctp_packet_transmit(struct sctp_packet *packet)
527 * Note: Adler-32 is no longer applicable, as has been replaced 528 * Note: Adler-32 is no longer applicable, as has been replaced
528 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 529 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
529 */ 530 */
530 if (!sctp_checksum_disable && !(dst->dev->features & NETIF_F_NO_CSUM)) { 531 if (!sctp_checksum_disable &&
532 !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) {
531 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 533 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
532 534
533 /* 3) Put the resultant value into the checksum field in the 535 /* 3) Put the resultant value into the checksum field in the
534 * common header, and leave the rest of the bits unchanged. 536 * common header, and leave the rest of the bits unchanged.
535 */ 537 */
536 sh->checksum = sctp_end_cksum(crc32); 538 sh->checksum = sctp_end_cksum(crc32);
537 } else 539 } else {
538 nskb->ip_summed = CHECKSUM_UNNECESSARY; 540 if (dst->dev->features & NETIF_F_SCTP_CSUM) {
541 /* no need to seed psuedo checksum for SCTP */
542 nskb->ip_summed = CHECKSUM_PARTIAL;
543 nskb->csum_start = (skb_transport_header(nskb) -
544 nskb->head);
545 nskb->csum_offset = offsetof(struct sctphdr, checksum);
546 } else {
547 nskb->ip_summed = CHECKSUM_UNNECESSARY;
548 }
549 }
539 550
540 /* IP layer ECN support 551 /* IP layer ECN support
541 * From RFC 2481 552 * From RFC 2481
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index f72ba774c246..524ba5696d4d 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -167,7 +167,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
167 tb_ptr->mtu = dev->mtu; 167 tb_ptr->mtu = dev->mtu;
168 tb_ptr->blocked = 0; 168 tb_ptr->blocked = 0;
169 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH); 169 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
170 memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN); 170 memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN);
171 return 0; 171 return 0;
172} 172}
173 173
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index c387217bb230..3c57005e44d1 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -68,7 +68,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
68 return 0; 68 return 0;
69} 69}
70 70
71static struct genl_family family = { 71static struct genl_family tipc_genl_family = {
72 .id = GENL_ID_GENERATE, 72 .id = GENL_ID_GENERATE,
73 .name = TIPC_GENL_NAME, 73 .name = TIPC_GENL_NAME,
74 .version = TIPC_GENL_VERSION, 74 .version = TIPC_GENL_VERSION,
@@ -76,39 +76,33 @@ static struct genl_family family = {
76 .maxattr = 0, 76 .maxattr = 0,
77}; 77};
78 78
79static struct genl_ops ops = { 79static struct genl_ops tipc_genl_ops = {
80 .cmd = TIPC_GENL_CMD, 80 .cmd = TIPC_GENL_CMD,
81 .doit = handle_cmd, 81 .doit = handle_cmd,
82}; 82};
83 83
84static int family_registered = 0; 84static int tipc_genl_family_registered;
85 85
86int tipc_netlink_start(void) 86int tipc_netlink_start(void)
87{ 87{
88 int res;
88 89
90 res = genl_register_family_with_ops(&tipc_genl_family,
91 &tipc_genl_ops, 1);
92 if (res) {
93 err("Failed to register netlink interface\n");
94 return res;
95 }
89 96
90 if (genl_register_family(&family)) 97 tipc_genl_family_registered = 1;
91 goto err;
92
93 family_registered = 1;
94
95 if (genl_register_ops(&family, &ops))
96 goto err_unregister;
97
98 return 0; 98 return 0;
99
100 err_unregister:
101 genl_unregister_family(&family);
102 family_registered = 0;
103 err:
104 err("Failed to register netlink interface\n");
105 return -EFAULT;
106} 99}
107 100
108void tipc_netlink_stop(void) 101void tipc_netlink_stop(void)
109{ 102{
110 if (family_registered) { 103 if (!tipc_genl_family_registered)
111 genl_unregister_family(&family); 104 return;
112 family_registered = 0; 105
113 } 106 genl_unregister_family(&tipc_genl_family);
107 tipc_genl_family_registered = 0;
114} 108}
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
index 5b80b941c2c9..8f1510d0cc2b 100644
--- a/net/wimax/Makefile
+++ b/net/wimax/Makefile
@@ -6,6 +6,7 @@ wimax-y := \
6 op-msg.o \ 6 op-msg.o \
7 op-reset.o \ 7 op-reset.o \
8 op-rfkill.o \ 8 op-rfkill.o \
9 op-state-get.o \
9 stack.o 10 stack.o
10 11
11wimax-$(CONFIG_DEBUG_FS) += debugfs.o 12wimax-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h
index 1c29123a3aa9..0975adba6b71 100644
--- a/net/wimax/debug-levels.h
+++ b/net/wimax/debug-levels.h
@@ -36,6 +36,7 @@ enum d_module {
36 D_SUBMODULE_DECLARE(op_msg), 36 D_SUBMODULE_DECLARE(op_msg),
37 D_SUBMODULE_DECLARE(op_reset), 37 D_SUBMODULE_DECLARE(op_reset),
38 D_SUBMODULE_DECLARE(op_rfkill), 38 D_SUBMODULE_DECLARE(op_rfkill),
39 D_SUBMODULE_DECLARE(op_state_get),
39 D_SUBMODULE_DECLARE(stack), 40 D_SUBMODULE_DECLARE(stack),
40}; 41};
41 42
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
index 94d216a46407..6c9bedb7431e 100644
--- a/net/wimax/debugfs.c
+++ b/net/wimax/debugfs.c
@@ -61,6 +61,7 @@ int wimax_debugfs_add(struct wimax_dev *wimax_dev)
61 __debugfs_register("wimax_dl_", op_msg, dentry); 61 __debugfs_register("wimax_dl_", op_msg, dentry);
62 __debugfs_register("wimax_dl_", op_reset, dentry); 62 __debugfs_register("wimax_dl_", op_reset, dentry);
63 __debugfs_register("wimax_dl_", op_rfkill, dentry); 63 __debugfs_register("wimax_dl_", op_rfkill, dentry);
64 __debugfs_register("wimax_dl_", op_state_get, dentry);
64 __debugfs_register("wimax_dl_", stack, dentry); 65 __debugfs_register("wimax_dl_", stack, dentry);
65 result = 0; 66 result = 0;
66out: 67out:
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 9ad4d893a566..d631a17186bc 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -108,6 +108,12 @@
108 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as 108 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
109 * wimax_msg_send() depends on skb->data being placed at the 109 * wimax_msg_send() depends on skb->data being placed at the
110 * beginning of the user message. 110 * beginning of the user message.
111 *
112 * Unlike other WiMAX stack calls, this call can be used way early,
113 * even before wimax_dev_add() is called, as long as the
114 * wimax_dev->net_dev pointer is set to point to a proper
115 * net_dev. This is so that drivers can use it early in case they need
116 * to send stuff around or communicate with user space.
111 */ 117 */
112struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, 118struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
113 const char *pipe_name, 119 const char *pipe_name,
@@ -115,7 +121,7 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
115 gfp_t gfp_flags) 121 gfp_t gfp_flags)
116{ 122{
117 int result; 123 int result;
118 struct device *dev = wimax_dev->net_dev->dev.parent; 124 struct device *dev = wimax_dev_to_dev(wimax_dev);
119 size_t msg_size; 125 size_t msg_size;
120 void *genl_msg; 126 void *genl_msg;
121 struct sk_buff *skb; 127 struct sk_buff *skb;
@@ -161,7 +167,6 @@ error_genlmsg_put:
161error_new: 167error_new:
162 nlmsg_free(skb); 168 nlmsg_free(skb);
163 return ERR_PTR(result); 169 return ERR_PTR(result);
164
165} 170}
166EXPORT_SYMBOL_GPL(wimax_msg_alloc); 171EXPORT_SYMBOL_GPL(wimax_msg_alloc);
167 172
@@ -256,10 +261,16 @@ EXPORT_SYMBOL_GPL(wimax_msg_len);
256 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as 261 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
257 * wimax_msg_send() depends on skb->data being placed at the 262 * wimax_msg_send() depends on skb->data being placed at the
258 * beginning of the user message. 263 * beginning of the user message.
264 *
265 * Unlike other WiMAX stack calls, this call can be used way early,
266 * even before wimax_dev_add() is called, as long as the
267 * wimax_dev->net_dev pointer is set to point to a proper
268 * net_dev. This is so that drivers can use it early in case they need
269 * to send stuff around or communicate with user space.
259 */ 270 */
260int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) 271int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
261{ 272{
262 struct device *dev = wimax_dev->net_dev->dev.parent; 273 struct device *dev = wimax_dev_to_dev(wimax_dev);
263 void *msg = skb->data; 274 void *msg = skb->data;
264 size_t size = skb->len; 275 size_t size = skb->len;
265 might_sleep(); 276 might_sleep();
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 2b75aee04217..a3616e2ccb8a 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -113,7 +113,7 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
113 if (state != wimax_dev->rf_hw) { 113 if (state != wimax_dev->rf_hw) {
114 wimax_dev->rf_hw = state; 114 wimax_dev->rf_hw = state;
115 rfkill_state = state == WIMAX_RF_ON ? 115 rfkill_state = state == WIMAX_RF_ON ?
116 RFKILL_STATE_OFF : RFKILL_STATE_ON; 116 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
117 if (wimax_dev->rf_hw == WIMAX_RF_ON 117 if (wimax_dev->rf_hw == WIMAX_RF_ON
118 && wimax_dev->rf_sw == WIMAX_RF_ON) 118 && wimax_dev->rf_sw == WIMAX_RF_ON)
119 wimax_state = WIMAX_ST_READY; 119 wimax_state = WIMAX_ST_READY;
@@ -259,10 +259,10 @@ int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state)
259 259
260 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); 260 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
261 switch (state) { 261 switch (state) {
262 case RFKILL_STATE_ON: 262 case RFKILL_STATE_SOFT_BLOCKED:
263 rf_state = WIMAX_RF_OFF; 263 rf_state = WIMAX_RF_OFF;
264 break; 264 break;
265 case RFKILL_STATE_OFF: 265 case RFKILL_STATE_UNBLOCKED:
266 rf_state = WIMAX_RF_ON; 266 rf_state = WIMAX_RF_ON;
267 break; 267 break;
268 default: 268 default:
@@ -361,10 +361,9 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
361 wimax_dev->rfkill = rfkill; 361 wimax_dev->rfkill = rfkill;
362 362
363 rfkill->name = wimax_dev->name; 363 rfkill->name = wimax_dev->name;
364 rfkill->state = RFKILL_STATE_OFF; 364 rfkill->state = RFKILL_STATE_UNBLOCKED;
365 rfkill->data = wimax_dev; 365 rfkill->data = wimax_dev;
366 rfkill->toggle_radio = wimax_rfkill_toggle_radio; 366 rfkill->toggle_radio = wimax_rfkill_toggle_radio;
367 rfkill->user_claim_unsupported = 1;
368 367
369 /* Initialize the input device for the hw key */ 368 /* Initialize the input device for the hw key */
370 input_dev = input_allocate_device(); 369 input_dev = input_allocate_device();
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
new file mode 100644
index 000000000000..a76b8fcb056d
--- /dev/null
+++ b/net/wimax/op-state-get.c
@@ -0,0 +1,86 @@
1/*
2 * Linux WiMAX
3 * Implement and export a method for getting a WiMAX device current state
4 *
5 * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
6 *
7 * Based on previous WiMAX core work by:
8 * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
9 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License version
13 * 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * 02110-1301, USA.
24 */
25
26#include <net/wimax.h>
27#include <net/genetlink.h>
28#include <linux/wimax.h>
29#include <linux/security.h>
30#include "wimax-internal.h"
31
32#define D_SUBMODULE op_state_get
33#include "debug-levels.h"
34
35
36static const
37struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
38 [WIMAX_GNL_STGET_IFIDX] = {
39 .type = NLA_U32,
40 },
41};
42
43
44/*
45 * Exporting to user space over generic netlink
46 *
47 * Parse the state get command from user space, return a combination
48 * value that describe the current state.
49 *
50 * No attributes.
51 */
52static
53int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
54{
55 int result, ifindex;
56 struct wimax_dev *wimax_dev;
57 struct device *dev;
58
59 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
60 result = -ENODEV;
61 if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
62 printk(KERN_ERR "WIMAX_GNL_OP_STATE_GET: can't find IFIDX "
63 "attribute\n");
64 goto error_no_wimax_dev;
65 }
66 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
67 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
68 if (wimax_dev == NULL)
69 goto error_no_wimax_dev;
70 dev = wimax_dev_to_dev(wimax_dev);
71 /* Execute the operation and send the result back to user space */
72 result = wimax_state_get(wimax_dev);
73 dev_put(wimax_dev->net_dev);
74error_no_wimax_dev:
75 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
76 return result;
77}
78
79
80struct genl_ops wimax_gnl_state_get = {
81 .cmd = WIMAX_GNL_OP_STATE_GET,
82 .flags = GENL_ADMIN_PERM,
83 .policy = wimax_gnl_state_get_policy,
84 .doit = wimax_gnl_doit_state_get,
85 .dumpit = NULL,
86};
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 933e1422b09f..79fb7d7c640f 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -402,13 +402,15 @@ EXPORT_SYMBOL_GPL(wimax_dev_init);
402extern struct genl_ops 402extern struct genl_ops
403 wimax_gnl_msg_from_user, 403 wimax_gnl_msg_from_user,
404 wimax_gnl_reset, 404 wimax_gnl_reset,
405 wimax_gnl_rfkill; 405 wimax_gnl_rfkill,
406 wimax_gnl_state_get;
406 407
407static 408static
408struct genl_ops *wimax_gnl_ops[] = { 409struct genl_ops *wimax_gnl_ops[] = {
409 &wimax_gnl_msg_from_user, 410 &wimax_gnl_msg_from_user,
410 &wimax_gnl_reset, 411 &wimax_gnl_reset,
411 &wimax_gnl_rfkill, 412 &wimax_gnl_rfkill,
413 &wimax_gnl_state_get,
412}; 414};
413 415
414 416
@@ -533,6 +535,7 @@ struct d_level D_LEVEL[] = {
533 D_SUBMODULE_DEFINE(op_msg), 535 D_SUBMODULE_DEFINE(op_msg),
534 D_SUBMODULE_DEFINE(op_reset), 536 D_SUBMODULE_DEFINE(op_reset),
535 D_SUBMODULE_DEFINE(op_rfkill), 537 D_SUBMODULE_DEFINE(op_rfkill),
538 D_SUBMODULE_DEFINE(op_state_get),
536 D_SUBMODULE_DEFINE(stack), 539 D_SUBMODULE_DEFINE(stack),
537}; 540};
538size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); 541size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 3c3bc9e579ed..45005497c634 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -10,6 +10,14 @@ config CFG80211_REG_DEBUG
10 10
11 If unsure, say N. 11 If unsure, say N.
12 12
13config CFG80211_DEBUGFS
14 bool "cfg80211 DebugFS entries"
15 depends on CFG80211 && DEBUG_FS
16 ---help---
17 You can enable this if you want to debugfs entries for cfg80211.
18
19 If unsure, say N.
20
13config WIRELESS_OLD_REGULATORY 21config WIRELESS_OLD_REGULATORY
14 bool "Old wireless static regulatory definitions" 22 bool "Old wireless static regulatory definitions"
15 default n 23 default n
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 6d1e7b27b752..f78c4832a9ca 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o 5obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o 6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
7 7
8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o 8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o
9cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o 10cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
10 11
11ccflags-y += -D__CHECK_ENDIAN__ 12ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d1f556535f6d..a5dbea1da476 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -14,10 +14,10 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <net/genetlink.h> 15#include <net/genetlink.h>
16#include <net/cfg80211.h> 16#include <net/cfg80211.h>
17#include <net/wireless.h>
18#include "nl80211.h" 17#include "nl80211.h"
19#include "core.h" 18#include "core.h"
20#include "sysfs.h" 19#include "sysfs.h"
20#include "debugfs.h"
21 21
22/* name for sysfs, %d is appended */ 22/* name for sysfs, %d is appended */
23#define PHY_NAME "phy" 23#define PHY_NAME "phy"
@@ -229,7 +229,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
229 229
230/* exported functions */ 230/* exported functions */
231 231
232struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) 232struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
233{ 233{
234 static int wiphy_counter; 234 static int wiphy_counter;
235 235
@@ -274,6 +274,16 @@ struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
274 drv->wiphy.dev.class = &ieee80211_class; 274 drv->wiphy.dev.class = &ieee80211_class;
275 drv->wiphy.dev.platform_data = drv; 275 drv->wiphy.dev.platform_data = drv;
276 276
277 /*
278 * Initialize wiphy parameters to IEEE 802.11 MIB default values.
279 * Fragmentation and RTS threshold are disabled by default with the
280 * special -1 value.
281 */
282 drv->wiphy.retry_short = 7;
283 drv->wiphy.retry_long = 4;
284 drv->wiphy.frag_threshold = (u32) -1;
285 drv->wiphy.rts_threshold = (u32) -1;
286
277 return &drv->wiphy; 287 return &drv->wiphy;
278} 288}
279EXPORT_SYMBOL(wiphy_new); 289EXPORT_SYMBOL(wiphy_new);
@@ -366,6 +376,8 @@ int wiphy_register(struct wiphy *wiphy)
366 nl80211_send_reg_change_event(&request); 376 nl80211_send_reg_change_event(&request);
367 } 377 }
368 378
379 cfg80211_debugfs_drv_add(drv);
380
369 res = 0; 381 res = 0;
370out_unlock: 382out_unlock:
371 mutex_unlock(&cfg80211_mutex); 383 mutex_unlock(&cfg80211_mutex);
@@ -396,6 +408,8 @@ void wiphy_unregister(struct wiphy *wiphy)
396 /* unlock again before freeing */ 408 /* unlock again before freeing */
397 mutex_unlock(&drv->mtx); 409 mutex_unlock(&drv->mtx);
398 410
411 cfg80211_debugfs_drv_del(drv);
412
399 /* If this device got a regulatory hint tell core its 413 /* If this device got a regulatory hint tell core its
400 * free to listen now to a new shiny device regulatory hint */ 414 * free to listen now to a new shiny device regulatory hint */
401 reg_device_remove(wiphy); 415 reg_device_remove(wiphy);
@@ -448,8 +462,28 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
448 "symlink to netdev!\n"); 462 "symlink to netdev!\n");
449 } 463 }
450 dev->ieee80211_ptr->netdev = dev; 464 dev->ieee80211_ptr->netdev = dev;
465#ifdef CONFIG_WIRELESS_EXT
466 dev->ieee80211_ptr->wext.default_key = -1;
467 dev->ieee80211_ptr->wext.default_mgmt_key = -1;
468#endif
451 mutex_unlock(&rdev->devlist_mtx); 469 mutex_unlock(&rdev->devlist_mtx);
452 break; 470 break;
471 case NETDEV_GOING_DOWN:
472 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC)
473 break;
474 if (!dev->ieee80211_ptr->ssid_len)
475 break;
476 cfg80211_leave_ibss(rdev, dev, true);
477 break;
478 case NETDEV_UP:
479#ifdef CONFIG_WIRELESS_EXT
480 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC)
481 break;
482 if (!dev->ieee80211_ptr->wext.ibss.ssid_len)
483 break;
484 cfg80211_join_ibss(rdev, dev, &dev->ieee80211_ptr->wext.ibss);
485 break;
486#endif
453 case NETDEV_UNREGISTER: 487 case NETDEV_UNREGISTER:
454 mutex_lock(&rdev->devlist_mtx); 488 mutex_lock(&rdev->devlist_mtx);
455 if (!list_empty(&dev->ieee80211_ptr->list)) { 489 if (!list_empty(&dev->ieee80211_ptr->list)) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 0a592e4295f0..ab512bcd8153 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Wireless configuration interface internals. 2 * Wireless configuration interface internals.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#ifndef __NET_WIRELESS_CORE_H 6#ifndef __NET_WIRELESS_CORE_H
7#define __NET_WIRELESS_CORE_H 7#define __NET_WIRELESS_CORE_H
@@ -10,14 +10,13 @@
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/kref.h> 11#include <linux/kref.h>
12#include <linux/rbtree.h> 12#include <linux/rbtree.h>
13#include <linux/mutex.h> 13#include <linux/debugfs.h>
14#include <net/genetlink.h> 14#include <net/genetlink.h>
15#include <net/wireless.h>
16#include <net/cfg80211.h> 15#include <net/cfg80211.h>
17#include "reg.h" 16#include "reg.h"
18 17
19struct cfg80211_registered_device { 18struct cfg80211_registered_device {
20 struct cfg80211_ops *ops; 19 const struct cfg80211_ops *ops;
21 struct list_head list; 20 struct list_head list;
22 /* we hold this mutex during any call so that 21 /* we hold this mutex during any call so that
23 * we cannot do multiple calls at once, and also 22 * we cannot do multiple calls at once, and also
@@ -52,6 +51,17 @@ struct cfg80211_registered_device {
52 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 51 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
53 unsigned long suspend_at; 52 unsigned long suspend_at;
54 53
54#ifdef CONFIG_CFG80211_DEBUGFS
55 /* Debugfs entries */
56 struct wiphy_debugfsdentries {
57 struct dentry *rts_threshold;
58 struct dentry *fragmentation_threshold;
59 struct dentry *short_retry_limit;
60 struct dentry *long_retry_limit;
61 struct dentry *ht40allow_map;
62 } debugfs;
63#endif
64
55 /* must be last because of the way we do wiphy_priv(), 65 /* must be last because of the way we do wiphy_priv(),
56 * and it should at least be aligned to NETDEV_ALIGN */ 66 * and it should at least be aligned to NETDEV_ALIGN */
57 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 67 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -74,10 +84,7 @@ bool wiphy_idx_valid(int wiphy_idx)
74extern struct mutex cfg80211_mutex; 84extern struct mutex cfg80211_mutex;
75extern struct list_head cfg80211_drv_list; 85extern struct list_head cfg80211_drv_list;
76 86
77static inline void assert_cfg80211_lock(void) 87#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex))
78{
79 WARN_ON(!mutex_is_locked(&cfg80211_mutex));
80}
81 88
82/* 89/*
83 * You can use this to mark a wiphy_idx as not having an associated wiphy. 90 * You can use this to mark a wiphy_idx as not having an associated wiphy.
@@ -148,4 +155,16 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
148void cfg80211_bss_age(struct cfg80211_registered_device *dev, 155void cfg80211_bss_age(struct cfg80211_registered_device *dev,
149 unsigned long age_secs); 156 unsigned long age_secs);
150 157
158/* IBSS */
159int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
160 struct net_device *dev,
161 struct cfg80211_ibss_params *params);
162void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
163int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
164 struct net_device *dev, bool nowext);
165
166/* internal helpers */
167int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
168 const u8 *mac_addr);
169
151#endif /* __NET_WIRELESS_CORE_H */ 170#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
new file mode 100644
index 000000000000..679ddfcec1ee
--- /dev/null
+++ b/net/wireless/debugfs.c
@@ -0,0 +1,131 @@
1/*
2 * cfg80211 debugfs
3 *
4 * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include "core.h"
13#include "debugfs.h"
14
15static int cfg80211_open_file_generic(struct inode *inode, struct file *file)
16{
17 file->private_data = inode->i_private;
18 return 0;
19}
20
21#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
22static ssize_t name## _read(struct file *file, char __user *userbuf, \
23 size_t count, loff_t *ppos) \
24{ \
25 struct wiphy *wiphy= file->private_data; \
26 char buf[buflen]; \
27 int res; \
28 \
29 res = scnprintf(buf, buflen, fmt "\n", ##value); \
30 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
31} \
32 \
33static const struct file_operations name## _ops = { \
34 .read = name## _read, \
35 .open = cfg80211_open_file_generic, \
36};
37
38DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
39 wiphy->rts_threshold)
40DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
41 wiphy->frag_threshold);
42DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
43 wiphy->retry_short)
44DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
45 wiphy->retry_long);
46
47static int ht_print_chan(struct ieee80211_channel *chan,
48 char *buf, int buf_size, int offset)
49{
50 if (WARN_ON(offset > buf_size))
51 return 0;
52
53 if (chan->flags & IEEE80211_CHAN_DISABLED)
54 return snprintf(buf + offset,
55 buf_size - offset,
56 "%d Disabled\n",
57 chan->center_freq);
58
59 return snprintf(buf + offset,
60 buf_size - offset,
61 "%d HT40 %c%c\n",
62 chan->center_freq,
63 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
64 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+');
65}
66
67static ssize_t ht40allow_map_read(struct file *file,
68 char __user *user_buf,
69 size_t count, loff_t *ppos)
70{
71 struct wiphy *wiphy = file->private_data;
72 char *buf;
73 unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
74 enum ieee80211_band band;
75 struct ieee80211_supported_band *sband;
76
77 buf = kzalloc(buf_size, GFP_KERNEL);
78 if (!buf)
79 return -ENOMEM;
80
81 mutex_lock(&cfg80211_mutex);
82
83 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
84 sband = wiphy->bands[band];
85 if (!sband)
86 continue;
87 for (i = 0; i < sband->n_channels; i++)
88 offset += ht_print_chan(&sband->channels[i],
89 buf, buf_size, offset);
90 }
91
92 mutex_unlock(&cfg80211_mutex);
93
94 r = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
95
96 kfree(buf);
97
98 return r;
99}
100
101static const struct file_operations ht40allow_map_ops = {
102 .read = ht40allow_map_read,
103 .open = cfg80211_open_file_generic,
104};
105
106#define DEBUGFS_ADD(name) \
107 drv->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \
108 &drv->wiphy, &name## _ops);
109#define DEBUGFS_DEL(name) \
110 debugfs_remove(drv->debugfs.name); \
111 drv->debugfs.name = NULL;
112
113void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv)
114{
115 struct dentry *phyd = drv->wiphy.debugfsdir;
116
117 DEBUGFS_ADD(rts_threshold);
118 DEBUGFS_ADD(fragmentation_threshold);
119 DEBUGFS_ADD(short_retry_limit);
120 DEBUGFS_ADD(long_retry_limit);
121 DEBUGFS_ADD(ht40allow_map);
122}
123
124void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv)
125{
126 DEBUGFS_DEL(rts_threshold);
127 DEBUGFS_DEL(fragmentation_threshold);
128 DEBUGFS_DEL(short_retry_limit);
129 DEBUGFS_DEL(long_retry_limit);
130 DEBUGFS_DEL(ht40allow_map);
131}
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h
new file mode 100644
index 000000000000..c226983ae66b
--- /dev/null
+++ b/net/wireless/debugfs.h
@@ -0,0 +1,14 @@
1#ifndef __CFG80211_DEBUGFS_H
2#define __CFG80211_DEBUGFS_H
3
4#ifdef CONFIG_CFG80211_DEBUGFS
5void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv);
6void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv);
7#else
8static inline
9void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) {}
10static inline
11void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv) {}
12#endif
13
14#endif /* __CFG80211_DEBUGFS_H */
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
new file mode 100644
index 000000000000..a4a1c3498ff2
--- /dev/null
+++ b/net/wireless/ibss.c
@@ -0,0 +1,369 @@
1/*
2 * Some IBSS support code for cfg80211.
3 *
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5 */
6
7#include <linux/etherdevice.h>
8#include <linux/if_arp.h>
9#include <net/cfg80211.h>
10#include "nl80211.h"
11
12
13void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
14{
15 struct wireless_dev *wdev = dev->ieee80211_ptr;
16 struct cfg80211_bss *bss;
17#ifdef CONFIG_WIRELESS_EXT
18 union iwreq_data wrqu;
19#endif
20
21 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
22 return;
23
24 if (WARN_ON(!wdev->ssid_len))
25 return;
26
27 if (memcmp(bssid, wdev->bssid, ETH_ALEN) == 0)
28 return;
29
30 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
31 wdev->ssid, wdev->ssid_len,
32 WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
33
34 if (WARN_ON(!bss))
35 return;
36
37 if (wdev->current_bss) {
38 cfg80211_unhold_bss(wdev->current_bss);
39 cfg80211_put_bss(wdev->current_bss);
40 }
41
42 cfg80211_hold_bss(bss);
43 wdev->current_bss = bss;
44 memcpy(wdev->bssid, bssid, ETH_ALEN);
45
46 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, gfp);
47#ifdef CONFIG_WIRELESS_EXT
48 memset(&wrqu, 0, sizeof(wrqu));
49 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
50 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
51#endif
52}
53EXPORT_SYMBOL(cfg80211_ibss_joined);
54
55int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
56 struct net_device *dev,
57 struct cfg80211_ibss_params *params)
58{
59 struct wireless_dev *wdev = dev->ieee80211_ptr;
60 int err;
61
62 if (wdev->ssid_len)
63 return -EALREADY;
64
65#ifdef CONFIG_WIRELESS_EXT
66 wdev->wext.ibss.channel = params->channel;
67#endif
68 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
69
70 if (err)
71 return err;
72
73 memcpy(wdev->ssid, params->ssid, params->ssid_len);
74 wdev->ssid_len = params->ssid_len;
75
76 return 0;
77}
78
79void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
80{
81 struct wireless_dev *wdev = dev->ieee80211_ptr;
82
83 if (wdev->current_bss) {
84 cfg80211_unhold_bss(wdev->current_bss);
85 cfg80211_put_bss(wdev->current_bss);
86 }
87
88 wdev->current_bss = NULL;
89 wdev->ssid_len = 0;
90 memset(wdev->bssid, 0, ETH_ALEN);
91#ifdef CONFIG_WIRELESS_EXT
92 if (!nowext)
93 wdev->wext.ibss.ssid_len = 0;
94#endif
95}
96
97int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
98 struct net_device *dev, bool nowext)
99{
100 int err;
101
102 err = rdev->ops->leave_ibss(&rdev->wiphy, dev);
103
104 if (err)
105 return err;
106
107 cfg80211_clear_ibss(dev, nowext);
108
109 return 0;
110}
111
112#ifdef CONFIG_WIRELESS_EXT
113static int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
114 struct wireless_dev *wdev)
115{
116 enum ieee80211_band band;
117 int i;
118
119 if (!wdev->wext.ibss.beacon_interval)
120 wdev->wext.ibss.beacon_interval = 100;
121
122 /* try to find an IBSS channel if none requested ... */
123 if (!wdev->wext.ibss.channel) {
124 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
125 struct ieee80211_supported_band *sband;
126 struct ieee80211_channel *chan;
127
128 sband = rdev->wiphy.bands[band];
129 if (!sband)
130 continue;
131
132 for (i = 0; i < sband->n_channels; i++) {
133 chan = &sband->channels[i];
134 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
135 continue;
136 if (chan->flags & IEEE80211_CHAN_DISABLED)
137 continue;
138 wdev->wext.ibss.channel = chan;
139 break;
140 }
141
142 if (wdev->wext.ibss.channel)
143 break;
144 }
145
146 if (!wdev->wext.ibss.channel)
147 return -EINVAL;
148 }
149
150 /* don't join -- SSID is not there */
151 if (!wdev->wext.ibss.ssid_len)
152 return 0;
153
154 if (!netif_running(wdev->netdev))
155 return 0;
156
157 return cfg80211_join_ibss(wiphy_to_dev(wdev->wiphy),
158 wdev->netdev, &wdev->wext.ibss);
159}
160
161int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
162 struct iw_request_info *info,
163 struct iw_freq *freq, char *extra)
164{
165 struct wireless_dev *wdev = dev->ieee80211_ptr;
166 struct ieee80211_channel *chan;
167 int err;
168
169 /* call only for ibss! */
170 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
171 return -EINVAL;
172
173 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
174 return -EOPNOTSUPP;
175
176 chan = cfg80211_wext_freq(wdev->wiphy, freq);
177 if (chan && IS_ERR(chan))
178 return PTR_ERR(chan);
179
180 if (chan &&
181 (chan->flags & IEEE80211_CHAN_NO_IBSS ||
182 chan->flags & IEEE80211_CHAN_DISABLED))
183 return -EINVAL;
184
185 if (wdev->wext.ibss.channel == chan)
186 return 0;
187
188 if (wdev->ssid_len) {
189 err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
190 dev, true);
191 if (err)
192 return err;
193 }
194
195 if (chan) {
196 wdev->wext.ibss.channel = chan;
197 wdev->wext.ibss.channel_fixed = true;
198 } else {
199 /* cfg80211_ibss_wext_join will pick one if needed */
200 wdev->wext.ibss.channel_fixed = false;
201 }
202
203 return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
204}
205/* temporary symbol - mark GPL - in the future the handler won't be */
206EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwfreq);
207
208int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
209 struct iw_request_info *info,
210 struct iw_freq *freq, char *extra)
211{
212 struct wireless_dev *wdev = dev->ieee80211_ptr;
213 struct ieee80211_channel *chan = NULL;
214
215 /* call only for ibss! */
216 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
217 return -EINVAL;
218
219 if (wdev->current_bss)
220 chan = wdev->current_bss->channel;
221 else if (wdev->wext.ibss.channel)
222 chan = wdev->wext.ibss.channel;
223
224 if (chan) {
225 freq->m = chan->center_freq;
226 freq->e = 6;
227 return 0;
228 }
229
230 /* no channel if not joining */
231 return -EINVAL;
232}
233/* temporary symbol - mark GPL - in the future the handler won't be */
234EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwfreq);
235
236int cfg80211_ibss_wext_siwessid(struct net_device *dev,
237 struct iw_request_info *info,
238 struct iw_point *data, char *ssid)
239{
240 struct wireless_dev *wdev = dev->ieee80211_ptr;
241 size_t len = data->length;
242 int err;
243
244 /* call only for ibss! */
245 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
246 return -EINVAL;
247
248 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
249 return -EOPNOTSUPP;
250
251 if (wdev->ssid_len) {
252 err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
253 dev, true);
254 if (err)
255 return err;
256 }
257
258 /* iwconfig uses nul termination in SSID.. */
259 if (len > 0 && ssid[len - 1] == '\0')
260 len--;
261
262 wdev->wext.ibss.ssid = wdev->ssid;
263 memcpy(wdev->wext.ibss.ssid, ssid, len);
264 wdev->wext.ibss.ssid_len = len;
265
266 return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
267}
268/* temporary symbol - mark GPL - in the future the handler won't be */
269EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwessid);
270
271int cfg80211_ibss_wext_giwessid(struct net_device *dev,
272 struct iw_request_info *info,
273 struct iw_point *data, char *ssid)
274{
275 struct wireless_dev *wdev = dev->ieee80211_ptr;
276
277 /* call only for ibss! */
278 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
279 return -EINVAL;
280
281 data->flags = 0;
282
283 if (wdev->ssid_len) {
284 data->flags = 1;
285 data->length = wdev->ssid_len;
286 memcpy(ssid, wdev->ssid, data->length);
287 } else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) {
288 data->flags = 1;
289 data->length = wdev->wext.ibss.ssid_len;
290 memcpy(ssid, wdev->wext.ibss.ssid, data->length);
291 }
292
293 return 0;
294}
295/* temporary symbol - mark GPL - in the future the handler won't be */
296EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwessid);
297
298int cfg80211_ibss_wext_siwap(struct net_device *dev,
299 struct iw_request_info *info,
300 struct sockaddr *ap_addr, char *extra)
301{
302 struct wireless_dev *wdev = dev->ieee80211_ptr;
303 u8 *bssid = ap_addr->sa_data;
304 int err;
305
306 /* call only for ibss! */
307 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
308 return -EINVAL;
309
310 if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
311 return -EOPNOTSUPP;
312
313 if (ap_addr->sa_family != ARPHRD_ETHER)
314 return -EINVAL;
315
316 /* automatic mode */
317 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
318 bssid = NULL;
319
320 /* both automatic */
321 if (!bssid && !wdev->wext.ibss.bssid)
322 return 0;
323
324 /* fixed already - and no change */
325 if (wdev->wext.ibss.bssid && bssid &&
326 compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0)
327 return 0;
328
329 if (wdev->ssid_len) {
330 err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
331 dev, true);
332 if (err)
333 return err;
334 }
335
336 if (bssid) {
337 memcpy(wdev->wext.bssid, bssid, ETH_ALEN);
338 wdev->wext.ibss.bssid = wdev->wext.bssid;
339 } else
340 wdev->wext.ibss.bssid = NULL;
341
342 return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
343}
344/* temporary symbol - mark GPL - in the future the handler won't be */
345EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwap);
346
347int cfg80211_ibss_wext_giwap(struct net_device *dev,
348 struct iw_request_info *info,
349 struct sockaddr *ap_addr, char *extra)
350{
351 struct wireless_dev *wdev = dev->ieee80211_ptr;
352
353 /* call only for ibss! */
354 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
355 return -EINVAL;
356
357 ap_addr->sa_family = ARPHRD_ETHER;
358
359 if (wdev->wext.ibss.bssid) {
360 memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
361 return 0;
362 }
363
364 memcpy(ap_addr->sa_data, wdev->bssid, ETH_ALEN);
365 return 0;
366}
367/* temporary symbol - mark GPL - in the future the handler won't be */
368EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwap);
369#endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index bec5721b6f99..42184361a109 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -28,19 +28,55 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
28} 28}
29EXPORT_SYMBOL(cfg80211_send_rx_assoc); 29EXPORT_SYMBOL(cfg80211_send_rx_assoc);
30 30
31void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, size_t len) 31void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len)
32{ 32{
33 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 33 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
34 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 34 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
35 nl80211_send_rx_deauth(rdev, dev, buf, len); 35 nl80211_send_deauth(rdev, dev, buf, len);
36} 36}
37EXPORT_SYMBOL(cfg80211_send_rx_deauth); 37EXPORT_SYMBOL(cfg80211_send_deauth);
38 38
39void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf, 39void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
40 size_t len)
41{ 40{
42 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 41 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
43 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 42 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
44 nl80211_send_rx_disassoc(rdev, dev, buf, len); 43 nl80211_send_disassoc(rdev, dev, buf, len);
45} 44}
46EXPORT_SYMBOL(cfg80211_send_rx_disassoc); 45EXPORT_SYMBOL(cfg80211_send_disassoc);
46
47static void cfg80211_wext_disconnected(struct net_device *dev)
48{
49#ifdef CONFIG_WIRELESS_EXT
50 union iwreq_data wrqu;
51 memset(&wrqu, 0, sizeof(wrqu));
52 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
53#endif
54}
55
56void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
57{
58 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
59 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
60 nl80211_send_auth_timeout(rdev, dev, addr);
61 cfg80211_wext_disconnected(dev);
62}
63EXPORT_SYMBOL(cfg80211_send_auth_timeout);
64
65void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
66{
67 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
68 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
69 nl80211_send_assoc_timeout(rdev, dev, addr);
70 cfg80211_wext_disconnected(dev);
71}
72EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
73
74void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
75 enum nl80211_key_type key_type, int key_id,
76 const u8 *tsc)
77{
78 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
79 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
80 nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc);
81}
82EXPORT_SYMBOL(cfg80211_michael_mic_failure);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2456e4ee445e..4b4d3c8a1aed 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the new netlink-based wireless configuration interface. 2 * This is the new netlink-based wireless configuration interface.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -57,10 +57,14 @@ static int get_drv_dev_by_info_ifindex(struct nlattr **attrs,
57static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { 57static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
58 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 58 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
59 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 59 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
60 .len = BUS_ID_SIZE-1 }, 60 .len = 20-1 },
61 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, 61 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED },
62 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, 62 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
63 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, 63 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
64 [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 },
65 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
66 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
67 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
64 68
65 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, 69 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
66 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 70 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -73,6 +77,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
73 [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, 77 [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
74 [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, 78 [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
75 [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, 79 [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
80 [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
76 81
77 [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, 82 [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
78 [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, 83 [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
@@ -116,8 +121,45 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
116 .len = IEEE80211_MAX_SSID_LEN }, 121 .len = IEEE80211_MAX_SSID_LEN },
117 [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, 122 [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 },
118 [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, 123 [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 },
124 [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG },
125 [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG },
126 [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 },
127 [NL80211_ATTR_STA_FLAGS2] = {
128 .len = sizeof(struct nl80211_sta_flag_update),
129 },
130 [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
119}; 131};
120 132
133/* IE validation */
134static bool is_valid_ie_attr(const struct nlattr *attr)
135{
136 const u8 *pos;
137 int len;
138
139 if (!attr)
140 return true;
141
142 pos = nla_data(attr);
143 len = nla_len(attr);
144
145 while (len) {
146 u8 elemlen;
147
148 if (len < 2)
149 return false;
150 len -= 2;
151
152 elemlen = pos[1];
153 if (elemlen > len)
154 return false;
155
156 len -= elemlen;
157 pos += 2 + elemlen;
158 }
159
160 return true;
161}
162
121/* message building helper */ 163/* message building helper */
122static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, 164static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
123 int flags, u8 cmd) 165 int flags, u8 cmd)
@@ -126,6 +168,30 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
126 return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); 168 return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd);
127} 169}
128 170
171static int nl80211_msg_put_channel(struct sk_buff *msg,
172 struct ieee80211_channel *chan)
173{
174 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
175 chan->center_freq);
176
177 if (chan->flags & IEEE80211_CHAN_DISABLED)
178 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
179 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
180 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
181 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
182 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
183 if (chan->flags & IEEE80211_CHAN_RADAR)
184 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
185
186 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
187 DBM_TO_MBM(chan->max_power));
188
189 return 0;
190
191 nla_put_failure:
192 return -ENOBUFS;
193}
194
129/* netlink command implementations */ 195/* netlink command implementations */
130 196
131static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, 197static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
@@ -149,8 +215,24 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
149 215
150 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); 216 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
151 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 217 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
218
219 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
220 dev->wiphy.retry_short);
221 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
222 dev->wiphy.retry_long);
223 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
224 dev->wiphy.frag_threshold);
225 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
226 dev->wiphy.rts_threshold);
227
152 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 228 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
153 dev->wiphy.max_scan_ssids); 229 dev->wiphy.max_scan_ssids);
230 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
231 dev->wiphy.max_scan_ie_len);
232
233 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
234 sizeof(u32) * dev->wiphy.n_cipher_suites,
235 dev->wiphy.cipher_suites);
154 236
155 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 237 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
156 if (!nl_modes) 238 if (!nl_modes)
@@ -202,20 +284,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
202 goto nla_put_failure; 284 goto nla_put_failure;
203 285
204 chan = &dev->wiphy.bands[band]->channels[i]; 286 chan = &dev->wiphy.bands[band]->channels[i];
205 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
206 chan->center_freq);
207
208 if (chan->flags & IEEE80211_CHAN_DISABLED)
209 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
210 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
211 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
212 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
213 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
214 if (chan->flags & IEEE80211_CHAN_RADAR)
215 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
216 287
217 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 288 if (nl80211_msg_put_channel(msg, chan))
218 DBM_TO_MBM(chan->max_power)); 289 goto nla_put_failure;
219 290
220 nla_nest_end(msg, nl_freq); 291 nla_nest_end(msg, nl_freq);
221 } 292 }
@@ -273,6 +344,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
273 CMD(assoc, ASSOCIATE); 344 CMD(assoc, ASSOCIATE);
274 CMD(deauth, DEAUTHENTICATE); 345 CMD(deauth, DEAUTHENTICATE);
275 CMD(disassoc, DISASSOCIATE); 346 CMD(disassoc, DISASSOCIATE);
347 CMD(join_ibss, JOIN_IBSS);
276 348
277#undef CMD 349#undef CMD
278 nla_nest_end(msg, nl_cmds); 350 nla_nest_end(msg, nl_cmds);
@@ -317,7 +389,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
317 if (IS_ERR(dev)) 389 if (IS_ERR(dev))
318 return PTR_ERR(dev); 390 return PTR_ERR(dev);
319 391
320 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 392 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
321 if (!msg) 393 if (!msg)
322 goto out_err; 394 goto out_err;
323 395
@@ -365,6 +437,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
365 struct cfg80211_registered_device *rdev; 437 struct cfg80211_registered_device *rdev;
366 int result = 0, rem_txq_params = 0; 438 int result = 0, rem_txq_params = 0;
367 struct nlattr *nl_txq_params; 439 struct nlattr *nl_txq_params;
440 u32 changed;
441 u8 retry_short = 0, retry_long = 0;
442 u32 frag_threshold = 0, rts_threshold = 0;
368 443
369 rtnl_lock(); 444 rtnl_lock();
370 445
@@ -418,7 +493,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
418 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 493 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
419 struct ieee80211_channel *chan; 494 struct ieee80211_channel *chan;
420 struct ieee80211_sta_ht_cap *ht_cap; 495 struct ieee80211_sta_ht_cap *ht_cap;
421 u32 freq, sec_freq; 496 u32 freq;
422 497
423 if (!rdev->ops->set_channel) { 498 if (!rdev->ops->set_channel) {
424 result = -EOPNOTSUPP; 499 result = -EOPNOTSUPP;
@@ -444,33 +519,28 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
444 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 519 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
445 goto bad_res; 520 goto bad_res;
446 521
447 if (channel_type == NL80211_CHAN_HT40MINUS) 522 if (channel_type == NL80211_CHAN_HT40MINUS &&
448 sec_freq = freq - 20; 523 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS))
449 else if (channel_type == NL80211_CHAN_HT40PLUS)
450 sec_freq = freq + 20;
451 else
452 sec_freq = 0;
453
454 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
455
456 /* no HT capabilities */
457 if (channel_type != NL80211_CHAN_NO_HT &&
458 !ht_cap->ht_supported)
459 goto bad_res; 524 goto bad_res;
525 else if (channel_type == NL80211_CHAN_HT40PLUS &&
526 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS))
527 goto bad_res;
528
529 /*
530 * At this point we know if that if HT40 was requested
531 * we are allowed to use it and the extension channel
532 * exists.
533 */
460 534
461 if (sec_freq) { 535 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
462 struct ieee80211_channel *schan;
463 536
464 /* no 40 MHz capabilities */ 537 /* no HT capabilities or intolerant */
538 if (channel_type != NL80211_CHAN_NO_HT) {
539 if (!ht_cap->ht_supported)
540 goto bad_res;
465 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 541 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
466 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) 542 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
467 goto bad_res; 543 goto bad_res;
468
469 schan = ieee80211_get_channel(&rdev->wiphy, sec_freq);
470
471 /* Secondary channel not allowed */
472 if (!schan || schan->flags & IEEE80211_CHAN_DISABLED)
473 goto bad_res;
474 } 544 }
475 545
476 result = rdev->ops->set_channel(&rdev->wiphy, chan, 546 result = rdev->ops->set_channel(&rdev->wiphy, chan,
@@ -479,6 +549,84 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
479 goto bad_res; 549 goto bad_res;
480 } 550 }
481 551
552 changed = 0;
553
554 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
555 retry_short = nla_get_u8(
556 info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]);
557 if (retry_short == 0) {
558 result = -EINVAL;
559 goto bad_res;
560 }
561 changed |= WIPHY_PARAM_RETRY_SHORT;
562 }
563
564 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) {
565 retry_long = nla_get_u8(
566 info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]);
567 if (retry_long == 0) {
568 result = -EINVAL;
569 goto bad_res;
570 }
571 changed |= WIPHY_PARAM_RETRY_LONG;
572 }
573
574 if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) {
575 frag_threshold = nla_get_u32(
576 info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]);
577 if (frag_threshold < 256) {
578 result = -EINVAL;
579 goto bad_res;
580 }
581 if (frag_threshold != (u32) -1) {
582 /*
583 * Fragments (apart from the last one) are required to
584 * have even length. Make the fragmentation code
585 * simpler by stripping LSB should someone try to use
586 * odd threshold value.
587 */
588 frag_threshold &= ~0x1;
589 }
590 changed |= WIPHY_PARAM_FRAG_THRESHOLD;
591 }
592
593 if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) {
594 rts_threshold = nla_get_u32(
595 info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]);
596 changed |= WIPHY_PARAM_RTS_THRESHOLD;
597 }
598
599 if (changed) {
600 u8 old_retry_short, old_retry_long;
601 u32 old_frag_threshold, old_rts_threshold;
602
603 if (!rdev->ops->set_wiphy_params) {
604 result = -EOPNOTSUPP;
605 goto bad_res;
606 }
607
608 old_retry_short = rdev->wiphy.retry_short;
609 old_retry_long = rdev->wiphy.retry_long;
610 old_frag_threshold = rdev->wiphy.frag_threshold;
611 old_rts_threshold = rdev->wiphy.rts_threshold;
612
613 if (changed & WIPHY_PARAM_RETRY_SHORT)
614 rdev->wiphy.retry_short = retry_short;
615 if (changed & WIPHY_PARAM_RETRY_LONG)
616 rdev->wiphy.retry_long = retry_long;
617 if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
618 rdev->wiphy.frag_threshold = frag_threshold;
619 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
620 rdev->wiphy.rts_threshold = rts_threshold;
621
622 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
623 if (result) {
624 rdev->wiphy.retry_short = old_retry_short;
625 rdev->wiphy.retry_long = old_retry_long;
626 rdev->wiphy.frag_threshold = old_frag_threshold;
627 rdev->wiphy.rts_threshold = old_rts_threshold;
628 }
629 }
482 630
483 bad_res: 631 bad_res:
484 mutex_unlock(&rdev->mtx); 632 mutex_unlock(&rdev->mtx);
@@ -489,6 +637,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
489 637
490 638
491static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, 639static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
640 struct cfg80211_registered_device *rdev,
492 struct net_device *dev) 641 struct net_device *dev)
493{ 642{
494 void *hdr; 643 void *hdr;
@@ -498,6 +647,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
498 return -1; 647 return -1;
499 648
500 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 649 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
650 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
501 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); 651 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
502 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); 652 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
503 return genlmsg_end(msg, hdr); 653 return genlmsg_end(msg, hdr);
@@ -532,7 +682,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
532 } 682 }
533 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, 683 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
534 cb->nlh->nlmsg_seq, NLM_F_MULTI, 684 cb->nlh->nlmsg_seq, NLM_F_MULTI,
535 wdev->netdev) < 0) { 685 dev, wdev->netdev) < 0) {
536 mutex_unlock(&dev->devlist_mtx); 686 mutex_unlock(&dev->devlist_mtx);
537 goto out; 687 goto out;
538 } 688 }
@@ -562,11 +712,12 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
562 if (err) 712 if (err)
563 return err; 713 return err;
564 714
565 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 715 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
566 if (!msg) 716 if (!msg)
567 goto out_err; 717 goto out_err;
568 718
569 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, netdev) < 0) 719 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
720 dev, netdev) < 0)
570 goto out_free; 721 goto out_free;
571 722
572 dev_put(netdev); 723 dev_put(netdev);
@@ -616,7 +767,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
616 struct cfg80211_registered_device *drv; 767 struct cfg80211_registered_device *drv;
617 struct vif_params params; 768 struct vif_params params;
618 int err, ifindex; 769 int err, ifindex;
619 enum nl80211_iftype type; 770 enum nl80211_iftype otype, ntype;
620 struct net_device *dev; 771 struct net_device *dev;
621 u32 _flags, *flags = NULL; 772 u32 _flags, *flags = NULL;
622 bool change = false; 773 bool change = false;
@@ -630,30 +781,27 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
630 goto unlock_rtnl; 781 goto unlock_rtnl;
631 782
632 ifindex = dev->ifindex; 783 ifindex = dev->ifindex;
633 type = dev->ieee80211_ptr->iftype; 784 otype = ntype = dev->ieee80211_ptr->iftype;
634 dev_put(dev); 785 dev_put(dev);
635 786
636 if (info->attrs[NL80211_ATTR_IFTYPE]) { 787 if (info->attrs[NL80211_ATTR_IFTYPE]) {
637 enum nl80211_iftype ntype;
638
639 ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); 788 ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
640 if (type != ntype) 789 if (otype != ntype)
641 change = true; 790 change = true;
642 type = ntype; 791 if (ntype > NL80211_IFTYPE_MAX) {
643 if (type > NL80211_IFTYPE_MAX) {
644 err = -EINVAL; 792 err = -EINVAL;
645 goto unlock; 793 goto unlock;
646 } 794 }
647 } 795 }
648 796
649 if (!drv->ops->change_virtual_intf || 797 if (!drv->ops->change_virtual_intf ||
650 !(drv->wiphy.interface_modes & (1 << type))) { 798 !(drv->wiphy.interface_modes & (1 << ntype))) {
651 err = -EOPNOTSUPP; 799 err = -EOPNOTSUPP;
652 goto unlock; 800 goto unlock;
653 } 801 }
654 802
655 if (info->attrs[NL80211_ATTR_MESH_ID]) { 803 if (info->attrs[NL80211_ATTR_MESH_ID]) {
656 if (type != NL80211_IFTYPE_MESH_POINT) { 804 if (ntype != NL80211_IFTYPE_MESH_POINT) {
657 err = -EINVAL; 805 err = -EINVAL;
658 goto unlock; 806 goto unlock;
659 } 807 }
@@ -663,7 +811,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
663 } 811 }
664 812
665 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { 813 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
666 if (type != NL80211_IFTYPE_MONITOR) { 814 if (ntype != NL80211_IFTYPE_MONITOR) {
667 err = -EINVAL; 815 err = -EINVAL;
668 goto unlock; 816 goto unlock;
669 } 817 }
@@ -678,12 +826,17 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
678 826
679 if (change) 827 if (change)
680 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, 828 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
681 type, flags, &params); 829 ntype, flags, &params);
682 else 830 else
683 err = 0; 831 err = 0;
684 832
685 dev = __dev_get_by_index(&init_net, ifindex); 833 dev = __dev_get_by_index(&init_net, ifindex);
686 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type)); 834 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != ntype));
835
836 if (dev && !err && (ntype != otype)) {
837 if (otype == NL80211_IFTYPE_ADHOC)
838 cfg80211_clear_ibss(dev, false);
839 }
687 840
688 unlock: 841 unlock:
689 cfg80211_put_dev(drv); 842 cfg80211_put_dev(drv);
@@ -832,7 +985,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
832 goto out; 985 goto out;
833 } 986 }
834 987
835 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 988 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
836 if (!msg) { 989 if (!msg) {
837 err = -ENOMEM; 990 err = -ENOMEM;
838 goto out; 991 goto out;
@@ -920,6 +1073,14 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
920 } 1073 }
921 1074
922 err = func(&drv->wiphy, dev, key_idx); 1075 err = func(&drv->wiphy, dev, key_idx);
1076#ifdef CONFIG_WIRELESS_EXT
1077 if (!err) {
1078 if (func == drv->ops->set_default_key)
1079 dev->ieee80211_ptr->wext.default_key = key_idx;
1080 else
1081 dev->ieee80211_ptr->wext.default_mgmt_key = key_idx;
1082 }
1083#endif
923 1084
924 out: 1085 out:
925 cfg80211_put_dev(drv); 1086 cfg80211_put_dev(drv);
@@ -934,7 +1095,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
934static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) 1095static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
935{ 1096{
936 struct cfg80211_registered_device *drv; 1097 struct cfg80211_registered_device *drv;
937 int err; 1098 int err, i;
938 struct net_device *dev; 1099 struct net_device *dev;
939 struct key_params params; 1100 struct key_params params;
940 u8 key_idx = 0; 1101 u8 key_idx = 0;
@@ -950,6 +1111,11 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
950 params.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); 1111 params.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
951 } 1112 }
952 1113
1114 if (info->attrs[NL80211_ATTR_KEY_SEQ]) {
1115 params.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
1116 params.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
1117 }
1118
953 if (info->attrs[NL80211_ATTR_KEY_IDX]) 1119 if (info->attrs[NL80211_ATTR_KEY_IDX])
954 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); 1120 key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
955 1121
@@ -958,51 +1124,23 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
958 if (info->attrs[NL80211_ATTR_MAC]) 1124 if (info->attrs[NL80211_ATTR_MAC])
959 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 1125 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
960 1126
961 if (key_idx > 5) 1127 if (cfg80211_validate_key_settings(&params, key_idx, mac_addr))
962 return -EINVAL; 1128 return -EINVAL;
963 1129
964 /*
965 * Disallow pairwise keys with non-zero index unless it's WEP
966 * (because current deployments use pairwise WEP keys with
967 * non-zero indizes but 802.11i clearly specifies to use zero)
968 */
969 if (mac_addr && key_idx &&
970 params.cipher != WLAN_CIPHER_SUITE_WEP40 &&
971 params.cipher != WLAN_CIPHER_SUITE_WEP104)
972 return -EINVAL;
973
974 /* TODO: add definitions for the lengths to linux/ieee80211.h */
975 switch (params.cipher) {
976 case WLAN_CIPHER_SUITE_WEP40:
977 if (params.key_len != 5)
978 return -EINVAL;
979 break;
980 case WLAN_CIPHER_SUITE_TKIP:
981 if (params.key_len != 32)
982 return -EINVAL;
983 break;
984 case WLAN_CIPHER_SUITE_CCMP:
985 if (params.key_len != 16)
986 return -EINVAL;
987 break;
988 case WLAN_CIPHER_SUITE_WEP104:
989 if (params.key_len != 13)
990 return -EINVAL;
991 break;
992 case WLAN_CIPHER_SUITE_AES_CMAC:
993 if (params.key_len != 16)
994 return -EINVAL;
995 break;
996 default:
997 return -EINVAL;
998 }
999
1000 rtnl_lock(); 1130 rtnl_lock();
1001 1131
1002 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1132 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1003 if (err) 1133 if (err)
1004 goto unlock_rtnl; 1134 goto unlock_rtnl;
1005 1135
1136 for (i = 0; i < drv->wiphy.n_cipher_suites; i++)
1137 if (params.cipher == drv->wiphy.cipher_suites[i])
1138 break;
1139 if (i == drv->wiphy.n_cipher_suites) {
1140 err = -EINVAL;
1141 goto out;
1142 }
1143
1006 if (!drv->ops->add_key) { 1144 if (!drv->ops->add_key) {
1007 err = -EOPNOTSUPP; 1145 err = -EOPNOTSUPP;
1008 goto out; 1146 goto out;
@@ -1049,6 +1187,15 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
1049 1187
1050 err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr); 1188 err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr);
1051 1189
1190#ifdef CONFIG_WIRELESS_EXT
1191 if (!err) {
1192 if (key_idx == dev->ieee80211_ptr->wext.default_key)
1193 dev->ieee80211_ptr->wext.default_key = -1;
1194 else if (key_idx == dev->ieee80211_ptr->wext.default_mgmt_key)
1195 dev->ieee80211_ptr->wext.default_mgmt_key = -1;
1196 }
1197#endif
1198
1052 out: 1199 out:
1053 cfg80211_put_dev(drv); 1200 cfg80211_put_dev(drv);
1054 dev_put(dev); 1201 dev_put(dev);
@@ -1069,6 +1216,9 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1069 struct beacon_parameters params; 1216 struct beacon_parameters params;
1070 int haveinfo = 0; 1217 int haveinfo = 0;
1071 1218
1219 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]))
1220 return -EINVAL;
1221
1072 rtnl_lock(); 1222 rtnl_lock();
1073 1223
1074 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 1224 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -1186,15 +1336,36 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
1186 [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, 1336 [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG },
1187 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, 1337 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG },
1188 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, 1338 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
1339 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
1189}; 1340};
1190 1341
1191static int parse_station_flags(struct nlattr *nla, u32 *staflags) 1342static int parse_station_flags(struct genl_info *info,
1343 struct station_parameters *params)
1192{ 1344{
1193 struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; 1345 struct nlattr *flags[NL80211_STA_FLAG_MAX + 1];
1346 struct nlattr *nla;
1194 int flag; 1347 int flag;
1195 1348
1196 *staflags = 0; 1349 /*
1350 * Try parsing the new attribute first so userspace
1351 * can specify both for older kernels.
1352 */
1353 nla = info->attrs[NL80211_ATTR_STA_FLAGS2];
1354 if (nla) {
1355 struct nl80211_sta_flag_update *sta_flags;
1356
1357 sta_flags = nla_data(nla);
1358 params->sta_flags_mask = sta_flags->mask;
1359 params->sta_flags_set = sta_flags->set;
1360 if ((params->sta_flags_mask |
1361 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID))
1362 return -EINVAL;
1363 return 0;
1364 }
1197 1365
1366 /* if present, parse the old attribute */
1367
1368 nla = info->attrs[NL80211_ATTR_STA_FLAGS];
1198 if (!nla) 1369 if (!nla)
1199 return 0; 1370 return 0;
1200 1371
@@ -1202,11 +1373,12 @@ static int parse_station_flags(struct nlattr *nla, u32 *staflags)
1202 nla, sta_flags_policy)) 1373 nla, sta_flags_policy))
1203 return -EINVAL; 1374 return -EINVAL;
1204 1375
1205 *staflags = STATION_FLAG_CHANGED; 1376 params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1;
1377 params->sta_flags_mask &= ~1;
1206 1378
1207 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) 1379 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
1208 if (flags[flag]) 1380 if (flags[flag])
1209 *staflags |= (1<<flag); 1381 params->sta_flags_set |= (1<<flag);
1210 1382
1211 return 0; 1383 return 0;
1212} 1384}
@@ -1424,7 +1596,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
1424 if (err) 1596 if (err)
1425 goto out; 1597 goto out;
1426 1598
1427 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1599 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1428 if (!msg) 1600 if (!msg)
1429 goto out; 1601 goto out;
1430 1602
@@ -1502,8 +1674,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1502 params.ht_capa = 1674 params.ht_capa =
1503 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 1675 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1504 1676
1505 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1677 if (parse_station_flags(info, &params))
1506 &params.station_flags))
1507 return -EINVAL; 1678 return -EINVAL;
1508 1679
1509 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 1680 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
@@ -1572,8 +1743,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1572 params.ht_capa = 1743 params.ht_capa =
1573 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 1744 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1574 1745
1575 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1746 if (parse_station_flags(info, &params))
1576 &params.station_flags))
1577 return -EINVAL; 1747 return -EINVAL;
1578 1748
1579 rtnl_lock(); 1749 rtnl_lock();
@@ -1582,6 +1752,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1582 if (err) 1752 if (err)
1583 goto out_rtnl; 1753 goto out_rtnl;
1584 1754
1755 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1756 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
1757 err = -EINVAL;
1758 goto out;
1759 }
1760
1585 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan); 1761 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1586 if (err) 1762 if (err)
1587 goto out; 1763 goto out;
@@ -1625,6 +1801,12 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
1625 if (err) 1801 if (err)
1626 goto out_rtnl; 1802 goto out_rtnl;
1627 1803
1804 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1805 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
1806 err = -EINVAL;
1807 goto out;
1808 }
1809
1628 if (!drv->ops->del_station) { 1810 if (!drv->ops->del_station) {
1629 err = -EOPNOTSUPP; 1811 err = -EOPNOTSUPP;
1630 goto out; 1812 goto out;
@@ -1808,7 +1990,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
1808 if (err) 1990 if (err)
1809 goto out; 1991 goto out;
1810 1992
1811 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1993 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1812 if (!msg) 1994 if (!msg)
1813 goto out; 1995 goto out;
1814 1996
@@ -2124,7 +2306,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
2124 goto out; 2306 goto out;
2125 2307
2126 /* Draw up a netlink message to send back */ 2308 /* Draw up a netlink message to send back */
2127 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2309 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2128 if (!msg) { 2310 if (!msg) {
2129 err = -ENOBUFS; 2311 err = -ENOBUFS;
2130 goto out; 2312 goto out;
@@ -2302,7 +2484,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
2302 if (!cfg80211_regdomain) 2484 if (!cfg80211_regdomain)
2303 goto out; 2485 goto out;
2304 2486
2305 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2487 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2306 if (!msg) { 2488 if (!msg) {
2307 err = -ENOBUFS; 2489 err = -ENOBUFS;
2308 goto out; 2490 goto out;
@@ -2385,18 +2567,24 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2385 rem_reg_rules) { 2567 rem_reg_rules) {
2386 num_rules++; 2568 num_rules++;
2387 if (num_rules > NL80211_MAX_SUPP_REG_RULES) 2569 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
2388 goto bad_reg; 2570 return -EINVAL;
2389 } 2571 }
2390 2572
2391 if (!reg_is_valid_request(alpha2)) 2573 mutex_lock(&cfg80211_mutex);
2392 return -EINVAL; 2574
2575 if (!reg_is_valid_request(alpha2)) {
2576 r = -EINVAL;
2577 goto bad_reg;
2578 }
2393 2579
2394 size_of_regd = sizeof(struct ieee80211_regdomain) + 2580 size_of_regd = sizeof(struct ieee80211_regdomain) +
2395 (num_rules * sizeof(struct ieee80211_reg_rule)); 2581 (num_rules * sizeof(struct ieee80211_reg_rule));
2396 2582
2397 rd = kzalloc(size_of_regd, GFP_KERNEL); 2583 rd = kzalloc(size_of_regd, GFP_KERNEL);
2398 if (!rd) 2584 if (!rd) {
2399 return -ENOMEM; 2585 r = -ENOMEM;
2586 goto bad_reg;
2587 }
2400 2588
2401 rd->n_reg_rules = num_rules; 2589 rd->n_reg_rules = num_rules;
2402 rd->alpha2[0] = alpha2[0]; 2590 rd->alpha2[0] = alpha2[0];
@@ -2413,20 +2601,24 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2413 2601
2414 rule_idx++; 2602 rule_idx++;
2415 2603
2416 if (rule_idx > NL80211_MAX_SUPP_REG_RULES) 2604 if (rule_idx > NL80211_MAX_SUPP_REG_RULES) {
2605 r = -EINVAL;
2417 goto bad_reg; 2606 goto bad_reg;
2607 }
2418 } 2608 }
2419 2609
2420 BUG_ON(rule_idx != num_rules); 2610 BUG_ON(rule_idx != num_rules);
2421 2611
2422 mutex_lock(&cfg80211_mutex);
2423 r = set_regdom(rd); 2612 r = set_regdom(rd);
2613
2424 mutex_unlock(&cfg80211_mutex); 2614 mutex_unlock(&cfg80211_mutex);
2615
2425 return r; 2616 return r;
2426 2617
2427 bad_reg: 2618 bad_reg:
2619 mutex_unlock(&cfg80211_mutex);
2428 kfree(rd); 2620 kfree(rd);
2429 return -EINVAL; 2621 return r;
2430} 2622}
2431 2623
2432static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) 2624static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
@@ -2442,6 +2634,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2442 enum ieee80211_band band; 2634 enum ieee80211_band band;
2443 size_t ie_len; 2635 size_t ie_len;
2444 2636
2637 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
2638 return -EINVAL;
2639
2445 rtnl_lock(); 2640 rtnl_lock();
2446 2641
2447 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2642 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2492,6 +2687,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2492 else 2687 else
2493 ie_len = 0; 2688 ie_len = 0;
2494 2689
2690 if (ie_len > wiphy->max_scan_ie_len) {
2691 err = -EINVAL;
2692 goto out;
2693 }
2694
2495 request = kzalloc(sizeof(*request) 2695 request = kzalloc(sizeof(*request)
2496 + sizeof(*ssid) * n_ssids 2696 + sizeof(*ssid) * n_ssids
2497 + sizeof(channel) * n_channels 2697 + sizeof(channel) * n_channels
@@ -2554,7 +2754,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
2554 2754
2555 if (info->attrs[NL80211_ATTR_IE]) { 2755 if (info->attrs[NL80211_ATTR_IE]) {
2556 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 2756 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2557 memcpy(request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), 2757 memcpy((void *)request->ie,
2758 nla_data(info->attrs[NL80211_ATTR_IE]),
2558 request->ie_len); 2759 request->ie_len);
2559 } 2760 }
2560 2761
@@ -2710,6 +2911,15 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2710 struct wiphy *wiphy; 2911 struct wiphy *wiphy;
2711 int err; 2912 int err;
2712 2913
2914 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
2915 return -EINVAL;
2916
2917 if (!info->attrs[NL80211_ATTR_MAC])
2918 return -EINVAL;
2919
2920 if (!info->attrs[NL80211_ATTR_AUTH_TYPE])
2921 return -EINVAL;
2922
2713 rtnl_lock(); 2923 rtnl_lock();
2714 2924
2715 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 2925 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2731,11 +2941,6 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2731 goto out; 2941 goto out;
2732 } 2942 }
2733 2943
2734 if (!info->attrs[NL80211_ATTR_MAC]) {
2735 err = -EINVAL;
2736 goto out;
2737 }
2738
2739 wiphy = &drv->wiphy; 2944 wiphy = &drv->wiphy;
2740 memset(&req, 0, sizeof(req)); 2945 memset(&req, 0, sizeof(req));
2741 2946
@@ -2761,13 +2966,10 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
2761 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 2966 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2762 } 2967 }
2763 2968
2764 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { 2969 req.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
2765 req.auth_type = 2970 if (!nl80211_valid_auth_type(req.auth_type)) {
2766 nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); 2971 err = -EINVAL;
2767 if (!nl80211_valid_auth_type(req.auth_type)) { 2972 goto out;
2768 err = -EINVAL;
2769 goto out;
2770 }
2771 } 2973 }
2772 2974
2773 err = drv->ops->auth(&drv->wiphy, dev, &req); 2975 err = drv->ops->auth(&drv->wiphy, dev, &req);
@@ -2788,6 +2990,13 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2788 struct wiphy *wiphy; 2990 struct wiphy *wiphy;
2789 int err; 2991 int err;
2790 2992
2993 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
2994 return -EINVAL;
2995
2996 if (!info->attrs[NL80211_ATTR_MAC] ||
2997 !info->attrs[NL80211_ATTR_SSID])
2998 return -EINVAL;
2999
2791 rtnl_lock(); 3000 rtnl_lock();
2792 3001
2793 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 3002 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2809,12 +3018,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2809 goto out; 3018 goto out;
2810 } 3019 }
2811 3020
2812 if (!info->attrs[NL80211_ATTR_MAC] ||
2813 !info->attrs[NL80211_ATTR_SSID]) {
2814 err = -EINVAL;
2815 goto out;
2816 }
2817
2818 wiphy = &drv->wiphy; 3021 wiphy = &drv->wiphy;
2819 memset(&req, 0, sizeof(req)); 3022 memset(&req, 0, sizeof(req));
2820 3023
@@ -2838,6 +3041,19 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
2838 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3041 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2839 } 3042 }
2840 3043
3044 if (info->attrs[NL80211_ATTR_USE_MFP]) {
3045 enum nl80211_mfp use_mfp =
3046 nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
3047 if (use_mfp == NL80211_MFP_REQUIRED)
3048 req.use_mfp = true;
3049 else if (use_mfp != NL80211_MFP_NO) {
3050 err = -EINVAL;
3051 goto out;
3052 }
3053 }
3054
3055 req.control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
3056
2841 err = drv->ops->assoc(&drv->wiphy, dev, &req); 3057 err = drv->ops->assoc(&drv->wiphy, dev, &req);
2842 3058
2843out: 3059out:
@@ -2856,6 +3072,15 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
2856 struct wiphy *wiphy; 3072 struct wiphy *wiphy;
2857 int err; 3073 int err;
2858 3074
3075 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3076 return -EINVAL;
3077
3078 if (!info->attrs[NL80211_ATTR_MAC])
3079 return -EINVAL;
3080
3081 if (!info->attrs[NL80211_ATTR_REASON_CODE])
3082 return -EINVAL;
3083
2859 rtnl_lock(); 3084 rtnl_lock();
2860 3085
2861 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 3086 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2877,24 +3102,16 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
2877 goto out; 3102 goto out;
2878 } 3103 }
2879 3104
2880 if (!info->attrs[NL80211_ATTR_MAC]) {
2881 err = -EINVAL;
2882 goto out;
2883 }
2884
2885 wiphy = &drv->wiphy; 3105 wiphy = &drv->wiphy;
2886 memset(&req, 0, sizeof(req)); 3106 memset(&req, 0, sizeof(req));
2887 3107
2888 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 3108 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2889 3109
2890 if (info->attrs[NL80211_ATTR_REASON_CODE]) { 3110 req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
2891 req.reason_code = 3111 if (req.reason_code == 0) {
2892 nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); 3112 /* Reason Code 0 is reserved */
2893 if (req.reason_code == 0) { 3113 err = -EINVAL;
2894 /* Reason Code 0 is reserved */ 3114 goto out;
2895 err = -EINVAL;
2896 goto out;
2897 }
2898 } 3115 }
2899 3116
2900 if (info->attrs[NL80211_ATTR_IE]) { 3117 if (info->attrs[NL80211_ATTR_IE]) {
@@ -2920,6 +3137,15 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
2920 struct wiphy *wiphy; 3137 struct wiphy *wiphy;
2921 int err; 3138 int err;
2922 3139
3140 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3141 return -EINVAL;
3142
3143 if (!info->attrs[NL80211_ATTR_MAC])
3144 return -EINVAL;
3145
3146 if (!info->attrs[NL80211_ATTR_REASON_CODE])
3147 return -EINVAL;
3148
2923 rtnl_lock(); 3149 rtnl_lock();
2924 3150
2925 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 3151 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
@@ -2941,24 +3167,16 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
2941 goto out; 3167 goto out;
2942 } 3168 }
2943 3169
2944 if (!info->attrs[NL80211_ATTR_MAC]) {
2945 err = -EINVAL;
2946 goto out;
2947 }
2948
2949 wiphy = &drv->wiphy; 3170 wiphy = &drv->wiphy;
2950 memset(&req, 0, sizeof(req)); 3171 memset(&req, 0, sizeof(req));
2951 3172
2952 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 3173 req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2953 3174
2954 if (info->attrs[NL80211_ATTR_REASON_CODE]) { 3175 req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
2955 req.reason_code = 3176 if (req.reason_code == 0) {
2956 nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); 3177 /* Reason Code 0 is reserved */
2957 if (req.reason_code == 0) { 3178 err = -EINVAL;
2958 /* Reason Code 0 is reserved */ 3179 goto out;
2959 err = -EINVAL;
2960 goto out;
2961 }
2962 } 3180 }
2963 3181
2964 if (info->attrs[NL80211_ATTR_IE]) { 3182 if (info->attrs[NL80211_ATTR_IE]) {
@@ -2976,6 +3194,124 @@ unlock_rtnl:
2976 return err; 3194 return err;
2977} 3195}
2978 3196
3197static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
3198{
3199 struct cfg80211_registered_device *drv;
3200 struct net_device *dev;
3201 struct cfg80211_ibss_params ibss;
3202 struct wiphy *wiphy;
3203 int err;
3204
3205 memset(&ibss, 0, sizeof(ibss));
3206
3207 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3208 return -EINVAL;
3209
3210 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
3211 !info->attrs[NL80211_ATTR_SSID] ||
3212 !nla_len(info->attrs[NL80211_ATTR_SSID]))
3213 return -EINVAL;
3214
3215 ibss.beacon_interval = 100;
3216
3217 if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
3218 ibss.beacon_interval =
3219 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
3220 if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000)
3221 return -EINVAL;
3222 }
3223
3224 rtnl_lock();
3225
3226 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
3227 if (err)
3228 goto unlock_rtnl;
3229
3230 if (!drv->ops->join_ibss) {
3231 err = -EOPNOTSUPP;
3232 goto out;
3233 }
3234
3235 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
3236 err = -EOPNOTSUPP;
3237 goto out;
3238 }
3239
3240 if (!netif_running(dev)) {
3241 err = -ENETDOWN;
3242 goto out;
3243 }
3244
3245 wiphy = &drv->wiphy;
3246
3247 if (info->attrs[NL80211_ATTR_MAC])
3248 ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
3249 ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
3250 ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
3251
3252 if (info->attrs[NL80211_ATTR_IE]) {
3253 ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
3254 ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3255 }
3256
3257 ibss.channel = ieee80211_get_channel(wiphy,
3258 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
3259 if (!ibss.channel ||
3260 ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
3261 ibss.channel->flags & IEEE80211_CHAN_DISABLED) {
3262 err = -EINVAL;
3263 goto out;
3264 }
3265
3266 ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
3267
3268 err = cfg80211_join_ibss(drv, dev, &ibss);
3269
3270out:
3271 cfg80211_put_dev(drv);
3272 dev_put(dev);
3273unlock_rtnl:
3274 rtnl_unlock();
3275 return err;
3276}
3277
3278static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info)
3279{
3280 struct cfg80211_registered_device *drv;
3281 struct net_device *dev;
3282 int err;
3283
3284 rtnl_lock();
3285
3286 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
3287 if (err)
3288 goto unlock_rtnl;
3289
3290 if (!drv->ops->leave_ibss) {
3291 err = -EOPNOTSUPP;
3292 goto out;
3293 }
3294
3295 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
3296 err = -EOPNOTSUPP;
3297 goto out;
3298 }
3299
3300 if (!netif_running(dev)) {
3301 err = -ENETDOWN;
3302 goto out;
3303 }
3304
3305 err = cfg80211_leave_ibss(drv, dev, false);
3306
3307out:
3308 cfg80211_put_dev(drv);
3309 dev_put(dev);
3310unlock_rtnl:
3311 rtnl_unlock();
3312 return err;
3313}
3314
2979static struct genl_ops nl80211_ops[] = { 3315static struct genl_ops nl80211_ops[] = {
2980 { 3316 {
2981 .cmd = NL80211_CMD_GET_WIPHY, 3317 .cmd = NL80211_CMD_GET_WIPHY,
@@ -3177,6 +3513,18 @@ static struct genl_ops nl80211_ops[] = {
3177 .policy = nl80211_policy, 3513 .policy = nl80211_policy,
3178 .flags = GENL_ADMIN_PERM, 3514 .flags = GENL_ADMIN_PERM,
3179 }, 3515 },
3516 {
3517 .cmd = NL80211_CMD_JOIN_IBSS,
3518 .doit = nl80211_join_ibss,
3519 .policy = nl80211_policy,
3520 .flags = GENL_ADMIN_PERM,
3521 },
3522 {
3523 .cmd = NL80211_CMD_LEAVE_IBSS,
3524 .doit = nl80211_leave_ibss,
3525 .policy = nl80211_policy,
3526 .flags = GENL_ADMIN_PERM,
3527 },
3180}; 3528};
3181static struct genl_multicast_group nl80211_mlme_mcgrp = { 3529static struct genl_multicast_group nl80211_mlme_mcgrp = {
3182 .name = "mlme", 3530 .name = "mlme",
@@ -3199,7 +3547,7 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
3199{ 3547{
3200 struct sk_buff *msg; 3548 struct sk_buff *msg;
3201 3549
3202 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3550 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3203 if (!msg) 3551 if (!msg)
3204 return; 3552 return;
3205 3553
@@ -3240,7 +3588,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
3240{ 3588{
3241 struct sk_buff *msg; 3589 struct sk_buff *msg;
3242 3590
3243 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3591 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3244 if (!msg) 3592 if (!msg)
3245 return; 3593 return;
3246 3594
@@ -3258,7 +3606,7 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
3258{ 3606{
3259 struct sk_buff *msg; 3607 struct sk_buff *msg;
3260 3608
3261 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3609 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3262 if (!msg) 3610 if (!msg)
3263 return; 3611 return;
3264 3612
@@ -3280,7 +3628,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
3280 struct sk_buff *msg; 3628 struct sk_buff *msg;
3281 void *hdr; 3629 void *hdr;
3282 3630
3283 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3631 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3284 if (!msg) 3632 if (!msg)
3285 return; 3633 return;
3286 3634
@@ -3334,7 +3682,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3334 struct sk_buff *msg; 3682 struct sk_buff *msg;
3335 void *hdr; 3683 void *hdr;
3336 3684
3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 3685 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3338 if (!msg) 3686 if (!msg)
3339 return; 3687 return;
3340 3688
@@ -3375,38 +3723,208 @@ void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
3375 nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE); 3723 nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE);
3376} 3724}
3377 3725
3378void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev, 3726void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
3379 struct net_device *netdev, const u8 *buf, 3727 struct net_device *netdev, const u8 *buf, size_t len)
3380 size_t len)
3381{ 3728{
3382 nl80211_send_mlme_event(rdev, netdev, buf, len, 3729 nl80211_send_mlme_event(rdev, netdev, buf, len,
3383 NL80211_CMD_DEAUTHENTICATE); 3730 NL80211_CMD_DEAUTHENTICATE);
3384} 3731}
3385 3732
3386void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev, 3733void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
3387 struct net_device *netdev, const u8 *buf, 3734 struct net_device *netdev, const u8 *buf,
3388 size_t len) 3735 size_t len)
3389{ 3736{
3390 nl80211_send_mlme_event(rdev, netdev, buf, len, 3737 nl80211_send_mlme_event(rdev, netdev, buf, len,
3391 NL80211_CMD_DISASSOCIATE); 3738 NL80211_CMD_DISASSOCIATE);
3392} 3739}
3393 3740
3741static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
3742 struct net_device *netdev, int cmd,
3743 const u8 *addr)
3744{
3745 struct sk_buff *msg;
3746 void *hdr;
3747
3748 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3749 if (!msg)
3750 return;
3751
3752 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
3753 if (!hdr) {
3754 nlmsg_free(msg);
3755 return;
3756 }
3757
3758 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3759 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3760 NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
3761 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
3762
3763 if (genlmsg_end(msg, hdr) < 0) {
3764 nlmsg_free(msg);
3765 return;
3766 }
3767
3768 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
3769 return;
3770
3771 nla_put_failure:
3772 genlmsg_cancel(msg, hdr);
3773 nlmsg_free(msg);
3774}
3775
3776void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
3777 struct net_device *netdev, const u8 *addr)
3778{
3779 nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE,
3780 addr);
3781}
3782
3783void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
3784 struct net_device *netdev, const u8 *addr)
3785{
3786 nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr);
3787}
3788
3789void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
3790 struct net_device *netdev, const u8 *bssid,
3791 gfp_t gfp)
3792{
3793 struct sk_buff *msg;
3794 void *hdr;
3795
3796 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3797 if (!msg)
3798 return;
3799
3800 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS);
3801 if (!hdr) {
3802 nlmsg_free(msg);
3803 return;
3804 }
3805
3806 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3807 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3808 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
3809
3810 if (genlmsg_end(msg, hdr) < 0) {
3811 nlmsg_free(msg);
3812 return;
3813 }
3814
3815 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
3816 return;
3817
3818 nla_put_failure:
3819 genlmsg_cancel(msg, hdr);
3820 nlmsg_free(msg);
3821}
3822
3823void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
3824 struct net_device *netdev, const u8 *addr,
3825 enum nl80211_key_type key_type, int key_id,
3826 const u8 *tsc)
3827{
3828 struct sk_buff *msg;
3829 void *hdr;
3830
3831 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3832 if (!msg)
3833 return;
3834
3835 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE);
3836 if (!hdr) {
3837 nlmsg_free(msg);
3838 return;
3839 }
3840
3841 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3842 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3843 if (addr)
3844 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
3845 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
3846 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
3847 if (tsc)
3848 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
3849
3850 if (genlmsg_end(msg, hdr) < 0) {
3851 nlmsg_free(msg);
3852 return;
3853 }
3854
3855 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL);
3856 return;
3857
3858 nla_put_failure:
3859 genlmsg_cancel(msg, hdr);
3860 nlmsg_free(msg);
3861}
3862
3863void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
3864 struct ieee80211_channel *channel_before,
3865 struct ieee80211_channel *channel_after)
3866{
3867 struct sk_buff *msg;
3868 void *hdr;
3869 struct nlattr *nl_freq;
3870
3871 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3872 if (!msg)
3873 return;
3874
3875 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT);
3876 if (!hdr) {
3877 nlmsg_free(msg);
3878 return;
3879 }
3880
3881 /*
3882 * Since we are applying the beacon hint to a wiphy we know its
3883 * wiphy_idx is valid
3884 */
3885 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy));
3886
3887 /* Before */
3888 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
3889 if (!nl_freq)
3890 goto nla_put_failure;
3891 if (nl80211_msg_put_channel(msg, channel_before))
3892 goto nla_put_failure;
3893 nla_nest_end(msg, nl_freq);
3894
3895 /* After */
3896 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER);
3897 if (!nl_freq)
3898 goto nla_put_failure;
3899 if (nl80211_msg_put_channel(msg, channel_after))
3900 goto nla_put_failure;
3901 nla_nest_end(msg, nl_freq);
3902
3903 if (genlmsg_end(msg, hdr) < 0) {
3904 nlmsg_free(msg);
3905 return;
3906 }
3907
3908 genlmsg_multicast(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC);
3909
3910 return;
3911
3912nla_put_failure:
3913 genlmsg_cancel(msg, hdr);
3914 nlmsg_free(msg);
3915}
3916
3394/* initialisation/exit functions */ 3917/* initialisation/exit functions */
3395 3918
3396int nl80211_init(void) 3919int nl80211_init(void)
3397{ 3920{
3398 int err, i; 3921 int err;
3399 3922
3400 err = genl_register_family(&nl80211_fam); 3923 err = genl_register_family_with_ops(&nl80211_fam,
3924 nl80211_ops, ARRAY_SIZE(nl80211_ops));
3401 if (err) 3925 if (err)
3402 return err; 3926 return err;
3403 3927
3404 for (i = 0; i < ARRAY_SIZE(nl80211_ops); i++) {
3405 err = genl_register_ops(&nl80211_fam, &nl80211_ops[i]);
3406 if (err)
3407 goto err_out;
3408 }
3409
3410 err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); 3928 err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp);
3411 if (err) 3929 if (err)
3412 goto err_out; 3930 goto err_out;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b77af4ab80be..5c12ad13499b 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -17,11 +17,31 @@ extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
17extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, 17extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
18 struct net_device *netdev, 18 struct net_device *netdev,
19 const u8 *buf, size_t len); 19 const u8 *buf, size_t len);
20extern void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev, 20extern void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
21 struct net_device *netdev, 21 struct net_device *netdev,
22 const u8 *buf, size_t len); 22 const u8 *buf, size_t len);
23extern void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev, 23extern void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
24 struct net_device *netdev, 24 struct net_device *netdev,
25 const u8 *buf, size_t len); 25 const u8 *buf, size_t len);
26extern void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
27 struct net_device *netdev,
28 const u8 *addr);
29extern void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
30 struct net_device *netdev,
31 const u8 *addr);
32extern void
33nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
34 struct net_device *netdev, const u8 *addr,
35 enum nl80211_key_type key_type,
36 int key_id, const u8 *tsc);
37
38extern void
39nl80211_send_beacon_hint_event(struct wiphy *wiphy,
40 struct ieee80211_channel *channel_before,
41 struct ieee80211_channel *channel_after);
42
43void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
44 struct net_device *netdev, const u8 *bssid,
45 gfp_t gfp);
26 46
27#endif /* __NET_WIRELESS_NL80211_H */ 47#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 487cb627ddba..f87ac1df2df5 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -37,7 +37,6 @@
37#include <linux/random.h> 37#include <linux/random.h>
38#include <linux/nl80211.h> 38#include <linux/nl80211.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <net/wireless.h>
41#include <net/cfg80211.h> 40#include <net/cfg80211.h>
42#include "core.h" 41#include "core.h"
43#include "reg.h" 42#include "reg.h"
@@ -49,12 +48,6 @@ static struct regulatory_request *last_request;
49/* To trigger userspace events */ 48/* To trigger userspace events */
50static struct platform_device *reg_pdev; 49static struct platform_device *reg_pdev;
51 50
52/* Keep the ordering from large to small */
53static u32 supported_bandwidths[] = {
54 MHZ_TO_KHZ(40),
55 MHZ_TO_KHZ(20),
56};
57
58/* 51/*
59 * Central wireless core regulatory domains, we only need two, 52 * Central wireless core regulatory domains, we only need two,
60 * the current one and a world regulatory domain in case we have no 53 * the current one and a world regulatory domain in case we have no
@@ -389,6 +382,8 @@ static int call_crda(const char *alpha2)
389/* Used by nl80211 before kmalloc'ing our regulatory domain */ 382/* Used by nl80211 before kmalloc'ing our regulatory domain */
390bool reg_is_valid_request(const char *alpha2) 383bool reg_is_valid_request(const char *alpha2)
391{ 384{
385 assert_cfg80211_lock();
386
392 if (!last_request) 387 if (!last_request)
393 return false; 388 return false;
394 389
@@ -436,19 +431,20 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
436 return true; 431 return true;
437} 432}
438 433
439/* Returns value in KHz */ 434static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
440static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range, 435 u32 center_freq_khz,
441 u32 freq) 436 u32 bw_khz)
442{ 437{
443 unsigned int i; 438 u32 start_freq_khz, end_freq_khz;
444 for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) { 439
445 u32 start_freq_khz = freq - supported_bandwidths[i]/2; 440 start_freq_khz = center_freq_khz - (bw_khz/2);
446 u32 end_freq_khz = freq + supported_bandwidths[i]/2; 441 end_freq_khz = center_freq_khz + (bw_khz/2);
447 if (start_freq_khz >= freq_range->start_freq_khz && 442
448 end_freq_khz <= freq_range->end_freq_khz) 443 if (start_freq_khz >= freq_range->start_freq_khz &&
449 return supported_bandwidths[i]; 444 end_freq_khz <= freq_range->end_freq_khz)
450 } 445 return true;
451 return 0; 446
447 return false;
452} 448}
453 449
454/** 450/**
@@ -848,14 +844,17 @@ static u32 map_regdom_flags(u32 rd_flags)
848 844
849static int freq_reg_info_regd(struct wiphy *wiphy, 845static int freq_reg_info_regd(struct wiphy *wiphy,
850 u32 center_freq, 846 u32 center_freq,
851 u32 *bandwidth, 847 u32 desired_bw_khz,
852 const struct ieee80211_reg_rule **reg_rule, 848 const struct ieee80211_reg_rule **reg_rule,
853 const struct ieee80211_regdomain *custom_regd) 849 const struct ieee80211_regdomain *custom_regd)
854{ 850{
855 int i; 851 int i;
856 bool band_rule_found = false; 852 bool band_rule_found = false;
857 const struct ieee80211_regdomain *regd; 853 const struct ieee80211_regdomain *regd;
858 u32 max_bandwidth = 0; 854 bool bw_fits = false;
855
856 if (!desired_bw_khz)
857 desired_bw_khz = MHZ_TO_KHZ(20);
859 858
860 regd = custom_regd ? custom_regd : cfg80211_regdomain; 859 regd = custom_regd ? custom_regd : cfg80211_regdomain;
861 860
@@ -888,38 +887,54 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
888 if (!band_rule_found) 887 if (!band_rule_found)
889 band_rule_found = freq_in_rule_band(fr, center_freq); 888 band_rule_found = freq_in_rule_band(fr, center_freq);
890 889
891 max_bandwidth = freq_max_bandwidth(fr, center_freq); 890 bw_fits = reg_does_bw_fit(fr,
891 center_freq,
892 desired_bw_khz);
892 893
893 if (max_bandwidth && *bandwidth <= max_bandwidth) { 894 if (band_rule_found && bw_fits) {
894 *reg_rule = rr; 895 *reg_rule = rr;
895 *bandwidth = max_bandwidth; 896 return 0;
896 break;
897 } 897 }
898 } 898 }
899 899
900 if (!band_rule_found) 900 if (!band_rule_found)
901 return -ERANGE; 901 return -ERANGE;
902 902
903 return !max_bandwidth; 903 return -EINVAL;
904} 904}
905EXPORT_SYMBOL(freq_reg_info); 905EXPORT_SYMBOL(freq_reg_info);
906 906
907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 907int freq_reg_info(struct wiphy *wiphy,
908 const struct ieee80211_reg_rule **reg_rule) 908 u32 center_freq,
909 u32 desired_bw_khz,
910 const struct ieee80211_reg_rule **reg_rule)
909{ 911{
910 assert_cfg80211_lock(); 912 assert_cfg80211_lock();
911 return freq_reg_info_regd(wiphy, center_freq, 913 return freq_reg_info_regd(wiphy,
912 bandwidth, reg_rule, NULL); 914 center_freq,
915 desired_bw_khz,
916 reg_rule,
917 NULL);
913} 918}
914 919
920/*
921 * Note that right now we assume the desired channel bandwidth
922 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
923 * per channel, the primary and the extension channel). To support
924 * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a
925 * new ieee80211_channel.target_bw and re run the regulatory check
926 * on the wiphy with the target_bw specified. Then we can simply use
927 * that below for the desired_bw_khz below.
928 */
915static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, 929static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
916 unsigned int chan_idx) 930 unsigned int chan_idx)
917{ 931{
918 int r; 932 int r;
919 u32 flags; 933 u32 flags, bw_flags = 0;
920 u32 max_bandwidth = 0; 934 u32 desired_bw_khz = MHZ_TO_KHZ(20);
921 const struct ieee80211_reg_rule *reg_rule = NULL; 935 const struct ieee80211_reg_rule *reg_rule = NULL;
922 const struct ieee80211_power_rule *power_rule = NULL; 936 const struct ieee80211_power_rule *power_rule = NULL;
937 const struct ieee80211_freq_range *freq_range = NULL;
923 struct ieee80211_supported_band *sband; 938 struct ieee80211_supported_band *sband;
924 struct ieee80211_channel *chan; 939 struct ieee80211_channel *chan;
925 struct wiphy *request_wiphy = NULL; 940 struct wiphy *request_wiphy = NULL;
@@ -934,8 +949,10 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
934 949
935 flags = chan->orig_flags; 950 flags = chan->orig_flags;
936 951
937 r = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq), 952 r = freq_reg_info(wiphy,
938 &max_bandwidth, &reg_rule); 953 MHZ_TO_KHZ(chan->center_freq),
954 desired_bw_khz,
955 &reg_rule);
939 956
940 if (r) { 957 if (r) {
941 /* 958 /*
@@ -978,6 +995,10 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
978 } 995 }
979 996
980 power_rule = &reg_rule->power_rule; 997 power_rule = &reg_rule->power_rule;
998 freq_range = &reg_rule->freq_range;
999
1000 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1001 bw_flags = IEEE80211_CHAN_NO_HT40;
981 1002
982 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 1003 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
983 request_wiphy && request_wiphy == wiphy && 1004 request_wiphy && request_wiphy == wiphy &&
@@ -988,19 +1009,19 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
988 * settings 1009 * settings
989 */ 1010 */
990 chan->flags = chan->orig_flags = 1011 chan->flags = chan->orig_flags =
991 map_regdom_flags(reg_rule->flags); 1012 map_regdom_flags(reg_rule->flags) | bw_flags;
992 chan->max_antenna_gain = chan->orig_mag = 1013 chan->max_antenna_gain = chan->orig_mag =
993 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1014 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
994 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1015 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
995 chan->max_power = chan->orig_mpwr = 1016 chan->max_power = chan->orig_mpwr =
996 (int) MBM_TO_DBM(power_rule->max_eirp); 1017 (int) MBM_TO_DBM(power_rule->max_eirp);
997 return; 1018 return;
998 } 1019 }
999 1020
1000 chan->flags = flags | map_regdom_flags(reg_rule->flags); 1021 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
1001 chan->max_antenna_gain = min(chan->orig_mag, 1022 chan->max_antenna_gain = min(chan->orig_mag,
1002 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 1023 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
1003 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1024 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1004 if (chan->orig_mpwr) 1025 if (chan->orig_mpwr)
1005 chan->max_power = min(chan->orig_mpwr, 1026 chan->max_power = min(chan->orig_mpwr,
1006 (int) MBM_TO_DBM(power_rule->max_eirp)); 1027 (int) MBM_TO_DBM(power_rule->max_eirp));
@@ -1050,18 +1071,10 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1050 unsigned int chan_idx, 1071 unsigned int chan_idx,
1051 struct reg_beacon *reg_beacon) 1072 struct reg_beacon *reg_beacon)
1052{ 1073{
1053#ifdef CONFIG_CFG80211_REG_DEBUG
1054#define REG_DEBUG_BEACON_FLAG(desc) \
1055 printk(KERN_DEBUG "cfg80211: Enabling " desc " on " \
1056 "frequency: %d MHz (Ch %d) on %s\n", \
1057 reg_beacon->chan.center_freq, \
1058 ieee80211_frequency_to_channel(reg_beacon->chan.center_freq), \
1059 wiphy_name(wiphy));
1060#else
1061#define REG_DEBUG_BEACON_FLAG(desc) do {} while (0)
1062#endif
1063 struct ieee80211_supported_band *sband; 1074 struct ieee80211_supported_band *sband;
1064 struct ieee80211_channel *chan; 1075 struct ieee80211_channel *chan;
1076 bool channel_changed = false;
1077 struct ieee80211_channel chan_before;
1065 1078
1066 assert_cfg80211_lock(); 1079 assert_cfg80211_lock();
1067 1080
@@ -1071,18 +1084,28 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1071 if (likely(chan->center_freq != reg_beacon->chan.center_freq)) 1084 if (likely(chan->center_freq != reg_beacon->chan.center_freq))
1072 return; 1085 return;
1073 1086
1074 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { 1087 if (chan->beacon_found)
1088 return;
1089
1090 chan->beacon_found = true;
1091
1092 chan_before.center_freq = chan->center_freq;
1093 chan_before.flags = chan->flags;
1094
1095 if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
1096 !(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1075 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; 1097 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
1076 REG_DEBUG_BEACON_FLAG("active scanning"); 1098 channel_changed = true;
1077 } 1099 }
1078 1100
1079 if (chan->flags & IEEE80211_CHAN_NO_IBSS) { 1101 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
1102 !(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) {
1080 chan->flags &= ~IEEE80211_CHAN_NO_IBSS; 1103 chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
1081 REG_DEBUG_BEACON_FLAG("beaconing"); 1104 channel_changed = true;
1082 } 1105 }
1083 1106
1084 chan->beacon_found = true; 1107 if (channel_changed)
1085#undef REG_DEBUG_BEACON_FLAG 1108 nl80211_send_beacon_hint_event(wiphy, &chan_before, chan);
1086} 1109}
1087 1110
1088/* 1111/*
@@ -1155,6 +1178,93 @@ static void reg_process_beacons(struct wiphy *wiphy)
1155 wiphy_update_beacon_reg(wiphy); 1178 wiphy_update_beacon_reg(wiphy);
1156} 1179}
1157 1180
1181static bool is_ht40_not_allowed(struct ieee80211_channel *chan)
1182{
1183 if (!chan)
1184 return true;
1185 if (chan->flags & IEEE80211_CHAN_DISABLED)
1186 return true;
1187 /* This would happen when regulatory rules disallow HT40 completely */
1188 if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40)))
1189 return true;
1190 return false;
1191}
1192
1193static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1194 enum ieee80211_band band,
1195 unsigned int chan_idx)
1196{
1197 struct ieee80211_supported_band *sband;
1198 struct ieee80211_channel *channel;
1199 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
1200 unsigned int i;
1201
1202 assert_cfg80211_lock();
1203
1204 sband = wiphy->bands[band];
1205 BUG_ON(chan_idx >= sband->n_channels);
1206 channel = &sband->channels[chan_idx];
1207
1208 if (is_ht40_not_allowed(channel)) {
1209 channel->flags |= IEEE80211_CHAN_NO_HT40;
1210 return;
1211 }
1212
1213 /*
1214 * We need to ensure the extension channels exist to
1215 * be able to use HT40- or HT40+, this finds them (or not)
1216 */
1217 for (i = 0; i < sband->n_channels; i++) {
1218 struct ieee80211_channel *c = &sband->channels[i];
1219 if (c->center_freq == (channel->center_freq - 20))
1220 channel_before = c;
1221 if (c->center_freq == (channel->center_freq + 20))
1222 channel_after = c;
1223 }
1224
1225 /*
1226 * Please note that this assumes target bandwidth is 20 MHz,
1227 * if that ever changes we also need to change the below logic
1228 * to include that as well.
1229 */
1230 if (is_ht40_not_allowed(channel_before))
1231 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
1232 else
1233 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
1234
1235 if (is_ht40_not_allowed(channel_after))
1236 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
1237 else
1238 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
1239}
1240
1241static void reg_process_ht_flags_band(struct wiphy *wiphy,
1242 enum ieee80211_band band)
1243{
1244 unsigned int i;
1245 struct ieee80211_supported_band *sband;
1246
1247 BUG_ON(!wiphy->bands[band]);
1248 sband = wiphy->bands[band];
1249
1250 for (i = 0; i < sband->n_channels; i++)
1251 reg_process_ht_flags_channel(wiphy, band, i);
1252}
1253
1254static void reg_process_ht_flags(struct wiphy *wiphy)
1255{
1256 enum ieee80211_band band;
1257
1258 if (!wiphy)
1259 return;
1260
1261 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1262 if (wiphy->bands[band])
1263 reg_process_ht_flags_band(wiphy, band);
1264 }
1265
1266}
1267
1158void wiphy_update_regulatory(struct wiphy *wiphy, 1268void wiphy_update_regulatory(struct wiphy *wiphy,
1159 enum nl80211_reg_initiator initiator) 1269 enum nl80211_reg_initiator initiator)
1160{ 1270{
@@ -1168,6 +1278,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1168 } 1278 }
1169out: 1279out:
1170 reg_process_beacons(wiphy); 1280 reg_process_beacons(wiphy);
1281 reg_process_ht_flags(wiphy);
1171 if (wiphy->reg_notifier) 1282 if (wiphy->reg_notifier)
1172 wiphy->reg_notifier(wiphy, last_request); 1283 wiphy->reg_notifier(wiphy, last_request);
1173} 1284}
@@ -1178,9 +1289,11 @@ static void handle_channel_custom(struct wiphy *wiphy,
1178 const struct ieee80211_regdomain *regd) 1289 const struct ieee80211_regdomain *regd)
1179{ 1290{
1180 int r; 1291 int r;
1181 u32 max_bandwidth = 0; 1292 u32 desired_bw_khz = MHZ_TO_KHZ(20);
1293 u32 bw_flags = 0;
1182 const struct ieee80211_reg_rule *reg_rule = NULL; 1294 const struct ieee80211_reg_rule *reg_rule = NULL;
1183 const struct ieee80211_power_rule *power_rule = NULL; 1295 const struct ieee80211_power_rule *power_rule = NULL;
1296 const struct ieee80211_freq_range *freq_range = NULL;
1184 struct ieee80211_supported_band *sband; 1297 struct ieee80211_supported_band *sband;
1185 struct ieee80211_channel *chan; 1298 struct ieee80211_channel *chan;
1186 1299
@@ -1190,8 +1303,11 @@ static void handle_channel_custom(struct wiphy *wiphy,
1190 BUG_ON(chan_idx >= sband->n_channels); 1303 BUG_ON(chan_idx >= sband->n_channels);
1191 chan = &sband->channels[chan_idx]; 1304 chan = &sband->channels[chan_idx];
1192 1305
1193 r = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq), 1306 r = freq_reg_info_regd(wiphy,
1194 &max_bandwidth, &reg_rule, regd); 1307 MHZ_TO_KHZ(chan->center_freq),
1308 desired_bw_khz,
1309 &reg_rule,
1310 regd);
1195 1311
1196 if (r) { 1312 if (r) {
1197 chan->flags = IEEE80211_CHAN_DISABLED; 1313 chan->flags = IEEE80211_CHAN_DISABLED;
@@ -1199,10 +1315,14 @@ static void handle_channel_custom(struct wiphy *wiphy,
1199 } 1315 }
1200 1316
1201 power_rule = &reg_rule->power_rule; 1317 power_rule = &reg_rule->power_rule;
1318 freq_range = &reg_rule->freq_range;
1202 1319
1203 chan->flags |= map_regdom_flags(reg_rule->flags); 1320 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1321 bw_flags = IEEE80211_CHAN_NO_HT40;
1322
1323 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
1204 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1324 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1205 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1325 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1206 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 1326 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
1207} 1327}
1208 1328
@@ -1224,13 +1344,22 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1224 const struct ieee80211_regdomain *regd) 1344 const struct ieee80211_regdomain *regd)
1225{ 1345{
1226 enum ieee80211_band band; 1346 enum ieee80211_band band;
1347 unsigned int bands_set = 0;
1227 1348
1228 mutex_lock(&cfg80211_mutex); 1349 mutex_lock(&cfg80211_mutex);
1229 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1350 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1230 if (wiphy->bands[band]) 1351 if (!wiphy->bands[band])
1231 handle_band_custom(wiphy, band, regd); 1352 continue;
1353 handle_band_custom(wiphy, band, regd);
1354 bands_set++;
1232 } 1355 }
1233 mutex_unlock(&cfg80211_mutex); 1356 mutex_unlock(&cfg80211_mutex);
1357
1358 /*
1359 * no point in calling this if it won't have any effect
1360 * on your device's supportd bands.
1361 */
1362 WARN_ON(!bands_set);
1234} 1363}
1235EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1364EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1236 1365
@@ -2119,14 +2248,14 @@ void reg_device_remove(struct wiphy *wiphy)
2119 2248
2120 assert_cfg80211_lock(); 2249 assert_cfg80211_lock();
2121 2250
2251 kfree(wiphy->regd);
2252
2122 if (last_request) 2253 if (last_request)
2123 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2254 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2124 2255
2125 kfree(wiphy->regd); 2256 if (!request_wiphy || request_wiphy != wiphy)
2126 if (!last_request || !request_wiphy)
2127 return;
2128 if (request_wiphy != wiphy)
2129 return; 2257 return;
2258
2130 last_request->wiphy_idx = WIPHY_IDX_STALE; 2259 last_request->wiphy_idx = WIPHY_IDX_STALE;
2131 last_request->country_ie_env = ENVIRON_ANY; 2260 last_request->country_ie_env = ENVIRON_ANY;
2132} 2261}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 1f260c40b6ca..df59440290e5 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -377,18 +377,16 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
377 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 377 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
378 size_t ielen = res->pub.len_information_elements; 378 size_t ielen = res->pub.len_information_elements;
379 379
380 if (ksize(found) >= used + ielen) { 380 if (!found->ies_allocated && ksize(found) >= used + ielen) {
381 memcpy(found->pub.information_elements, 381 memcpy(found->pub.information_elements,
382 res->pub.information_elements, ielen); 382 res->pub.information_elements, ielen);
383 found->pub.len_information_elements = ielen; 383 found->pub.len_information_elements = ielen;
384 } else { 384 } else {
385 u8 *ies = found->pub.information_elements; 385 u8 *ies = found->pub.information_elements;
386 386
387 if (found->ies_allocated) { 387 if (found->ies_allocated)
388 if (ksize(ies) < ielen) 388 ies = krealloc(ies, ielen, GFP_ATOMIC);
389 ies = krealloc(ies, ielen, 389 else
390 GFP_ATOMIC);
391 } else
392 ies = kmalloc(ielen, GFP_ATOMIC); 390 ies = kmalloc(ielen, GFP_ATOMIC);
393 391
394 if (ies) { 392 if (ies) {
@@ -415,6 +413,55 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
415 return found; 413 return found;
416} 414}
417 415
416struct cfg80211_bss*
417cfg80211_inform_bss(struct wiphy *wiphy,
418 struct ieee80211_channel *channel,
419 const u8 *bssid,
420 u64 timestamp, u16 capability, u16 beacon_interval,
421 const u8 *ie, size_t ielen,
422 s32 signal, gfp_t gfp)
423{
424 struct cfg80211_internal_bss *res;
425 size_t privsz;
426
427 if (WARN_ON(!wiphy))
428 return NULL;
429
430 privsz = wiphy->bss_priv_size;
431
432 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
433 (signal < 0 || signal > 100)))
434 return NULL;
435
436 res = kzalloc(sizeof(*res) + privsz + ielen, gfp);
437 if (!res)
438 return NULL;
439
440 memcpy(res->pub.bssid, bssid, ETH_ALEN);
441 res->pub.channel = channel;
442 res->pub.signal = signal;
443 res->pub.tsf = timestamp;
444 res->pub.beacon_interval = beacon_interval;
445 res->pub.capability = capability;
446 /* point to after the private area */
447 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
448 memcpy(res->pub.information_elements, ie, ielen);
449 res->pub.len_information_elements = ielen;
450
451 kref_init(&res->ref);
452
453 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0);
454 if (!res)
455 return NULL;
456
457 if (res->pub.capability & WLAN_CAPABILITY_ESS)
458 regulatory_hint_found_beacon(wiphy, channel, gfp);
459
460 /* cfg80211_bss_update gives us a referenced result */
461 return &res->pub;
462}
463EXPORT_SYMBOL(cfg80211_inform_bss);
464
418struct cfg80211_bss * 465struct cfg80211_bss *
419cfg80211_inform_bss_frame(struct wiphy *wiphy, 466cfg80211_inform_bss_frame(struct wiphy *wiphy,
420 struct ieee80211_channel *channel, 467 struct ieee80211_channel *channel,
@@ -605,7 +652,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
605 cfg80211_put_dev(rdev); 652 cfg80211_put_dev(rdev);
606 return err; 653 return err;
607} 654}
608EXPORT_SYMBOL(cfg80211_wext_siwscan); 655EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
609 656
610static void ieee80211_scan_add_ies(struct iw_request_info *info, 657static void ieee80211_scan_add_ies(struct iw_request_info *info,
611 struct cfg80211_bss *bss, 658 struct cfg80211_bss *bss,
@@ -914,5 +961,5 @@ int cfg80211_wext_giwscan(struct net_device *dev,
914 cfg80211_put_dev(rdev); 961 cfg80211_put_dev(rdev);
915 return res; 962 return res;
916} 963}
917EXPORT_SYMBOL(cfg80211_wext_giwscan); 964EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan);
918#endif 965#endif
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 487cdd9bcffc..d072bff463aa 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1,10 +1,12 @@
1/* 1/*
2 * Wireless utility functions 2 * Wireless utility functions
3 * 3 *
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#include <net/wireless.h> 6#include <linux/bitops.h>
7#include <asm/bitops.h> 7#include <linux/etherdevice.h>
8#include <net/cfg80211.h>
9#include <net/ip.h>
8#include "core.h" 10#include "core.h"
9 11
10struct ieee80211_rate * 12struct ieee80211_rate *
@@ -138,3 +140,366 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
138 if (wiphy->bands[band]) 140 if (wiphy->bands[band])
139 set_mandatory_flags_band(wiphy->bands[band], band); 141 set_mandatory_flags_band(wiphy->bands[band], band);
140} 142}
143
144int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
145 const u8 *mac_addr)
146{
147 if (key_idx > 5)
148 return -EINVAL;
149
150 /*
151 * Disallow pairwise keys with non-zero index unless it's WEP
152 * (because current deployments use pairwise WEP keys with
153 * non-zero indizes but 802.11i clearly specifies to use zero)
154 */
155 if (mac_addr && key_idx &&
156 params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
157 params->cipher != WLAN_CIPHER_SUITE_WEP104)
158 return -EINVAL;
159
160 /* TODO: add definitions for the lengths to linux/ieee80211.h */
161 switch (params->cipher) {
162 case WLAN_CIPHER_SUITE_WEP40:
163 if (params->key_len != 5)
164 return -EINVAL;
165 break;
166 case WLAN_CIPHER_SUITE_TKIP:
167 if (params->key_len != 32)
168 return -EINVAL;
169 break;
170 case WLAN_CIPHER_SUITE_CCMP:
171 if (params->key_len != 16)
172 return -EINVAL;
173 break;
174 case WLAN_CIPHER_SUITE_WEP104:
175 if (params->key_len != 13)
176 return -EINVAL;
177 break;
178 case WLAN_CIPHER_SUITE_AES_CMAC:
179 if (params->key_len != 16)
180 return -EINVAL;
181 break;
182 default:
183 return -EINVAL;
184 }
185
186 if (params->seq) {
187 switch (params->cipher) {
188 case WLAN_CIPHER_SUITE_WEP40:
189 case WLAN_CIPHER_SUITE_WEP104:
190 /* These ciphers do not use key sequence */
191 return -EINVAL;
192 case WLAN_CIPHER_SUITE_TKIP:
193 case WLAN_CIPHER_SUITE_CCMP:
194 case WLAN_CIPHER_SUITE_AES_CMAC:
195 if (params->seq_len != 6)
196 return -EINVAL;
197 break;
198 }
199 }
200
201 return 0;
202}
203
204/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
205/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
206const unsigned char rfc1042_header[] __aligned(2) =
207 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
208EXPORT_SYMBOL(rfc1042_header);
209
210/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
211const unsigned char bridge_tunnel_header[] __aligned(2) =
212 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
213EXPORT_SYMBOL(bridge_tunnel_header);
214
215unsigned int ieee80211_hdrlen(__le16 fc)
216{
217 unsigned int hdrlen = 24;
218
219 if (ieee80211_is_data(fc)) {
220 if (ieee80211_has_a4(fc))
221 hdrlen = 30;
222 if (ieee80211_is_data_qos(fc))
223 hdrlen += IEEE80211_QOS_CTL_LEN;
224 goto out;
225 }
226
227 if (ieee80211_is_ctl(fc)) {
228 /*
229 * ACK and CTS are 10 bytes, all others 16. To see how
230 * to get this condition consider
231 * subtype mask: 0b0000000011110000 (0x00F0)
232 * ACK subtype: 0b0000000011010000 (0x00D0)
233 * CTS subtype: 0b0000000011000000 (0x00C0)
234 * bits that matter: ^^^ (0x00E0)
235 * value of those: 0b0000000011000000 (0x00C0)
236 */
237 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
238 hdrlen = 10;
239 else
240 hdrlen = 16;
241 }
242out:
243 return hdrlen;
244}
245EXPORT_SYMBOL(ieee80211_hdrlen);
246
247unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
248{
249 const struct ieee80211_hdr *hdr =
250 (const struct ieee80211_hdr *)skb->data;
251 unsigned int hdrlen;
252
253 if (unlikely(skb->len < 10))
254 return 0;
255 hdrlen = ieee80211_hdrlen(hdr->frame_control);
256 if (unlikely(hdrlen > skb->len))
257 return 0;
258 return hdrlen;
259}
260EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
261
262int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
263{
264 int ae = meshhdr->flags & MESH_FLAGS_AE;
265 /* 7.1.3.5a.2 */
266 switch (ae) {
267 case 0:
268 return 6;
269 case 1:
270 return 12;
271 case 2:
272 return 18;
273 case 3:
274 return 24;
275 default:
276 return 6;
277 }
278}
279
280int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
281 enum nl80211_iftype iftype)
282{
283 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
284 u16 hdrlen, ethertype;
285 u8 *payload;
286 u8 dst[ETH_ALEN];
287 u8 src[ETH_ALEN] __aligned(2);
288
289 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
290 return -1;
291
292 hdrlen = ieee80211_hdrlen(hdr->frame_control);
293
294 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
295 * header
296 * IEEE 802.11 address fields:
297 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
298 * 0 0 DA SA BSSID n/a
299 * 0 1 DA BSSID SA n/a
300 * 1 0 BSSID SA DA n/a
301 * 1 1 RA TA DA SA
302 */
303 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
304 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
305
306 switch (hdr->frame_control &
307 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
308 case cpu_to_le16(IEEE80211_FCTL_TODS):
309 if (unlikely(iftype != NL80211_IFTYPE_AP &&
310 iftype != NL80211_IFTYPE_AP_VLAN))
311 return -1;
312 break;
313 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
314 if (unlikely(iftype != NL80211_IFTYPE_WDS &&
315 iftype != NL80211_IFTYPE_MESH_POINT))
316 return -1;
317 if (iftype == NL80211_IFTYPE_MESH_POINT) {
318 struct ieee80211s_hdr *meshdr =
319 (struct ieee80211s_hdr *) (skb->data + hdrlen);
320 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
321 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
322 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
323 memcpy(src, meshdr->eaddr2, ETH_ALEN);
324 }
325 }
326 break;
327 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
328 if (iftype != NL80211_IFTYPE_STATION ||
329 (is_multicast_ether_addr(dst) &&
330 !compare_ether_addr(src, addr)))
331 return -1;
332 break;
333 case cpu_to_le16(0):
334 if (iftype != NL80211_IFTYPE_ADHOC)
335 return -1;
336 break;
337 }
338
339 if (unlikely(skb->len - hdrlen < 8))
340 return -1;
341
342 payload = skb->data + hdrlen;
343 ethertype = (payload[6] << 8) | payload[7];
344
345 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
346 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
347 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
348 /* remove RFC1042 or Bridge-Tunnel encapsulation and
349 * replace EtherType */
350 skb_pull(skb, hdrlen + 6);
351 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
352 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
353 } else {
354 struct ethhdr *ehdr;
355 __be16 len;
356
357 skb_pull(skb, hdrlen);
358 len = htons(skb->len);
359 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
360 memcpy(ehdr->h_dest, dst, ETH_ALEN);
361 memcpy(ehdr->h_source, src, ETH_ALEN);
362 ehdr->h_proto = len;
363 }
364 return 0;
365}
366EXPORT_SYMBOL(ieee80211_data_to_8023);
367
368int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
369 enum nl80211_iftype iftype, u8 *bssid, bool qos)
370{
371 struct ieee80211_hdr hdr;
372 u16 hdrlen, ethertype;
373 __le16 fc;
374 const u8 *encaps_data;
375 int encaps_len, skip_header_bytes;
376 int nh_pos, h_pos;
377 int head_need;
378
379 if (unlikely(skb->len < ETH_HLEN))
380 return -EINVAL;
381
382 nh_pos = skb_network_header(skb) - skb->data;
383 h_pos = skb_transport_header(skb) - skb->data;
384
385 /* convert Ethernet header to proper 802.11 header (based on
386 * operation mode) */
387 ethertype = (skb->data[12] << 8) | skb->data[13];
388 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
389
390 switch (iftype) {
391 case NL80211_IFTYPE_AP:
392 case NL80211_IFTYPE_AP_VLAN:
393 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
394 /* DA BSSID SA */
395 memcpy(hdr.addr1, skb->data, ETH_ALEN);
396 memcpy(hdr.addr2, addr, ETH_ALEN);
397 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
398 hdrlen = 24;
399 break;
400 case NL80211_IFTYPE_STATION:
401 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
402 /* BSSID SA DA */
403 memcpy(hdr.addr1, bssid, ETH_ALEN);
404 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
405 memcpy(hdr.addr3, skb->data, ETH_ALEN);
406 hdrlen = 24;
407 break;
408 case NL80211_IFTYPE_ADHOC:
409 /* DA SA BSSID */
410 memcpy(hdr.addr1, skb->data, ETH_ALEN);
411 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
412 memcpy(hdr.addr3, bssid, ETH_ALEN);
413 hdrlen = 24;
414 break;
415 default:
416 return -EOPNOTSUPP;
417 }
418
419 if (qos) {
420 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
421 hdrlen += 2;
422 }
423
424 hdr.frame_control = fc;
425 hdr.duration_id = 0;
426 hdr.seq_ctrl = 0;
427
428 skip_header_bytes = ETH_HLEN;
429 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
430 encaps_data = bridge_tunnel_header;
431 encaps_len = sizeof(bridge_tunnel_header);
432 skip_header_bytes -= 2;
433 } else if (ethertype > 0x600) {
434 encaps_data = rfc1042_header;
435 encaps_len = sizeof(rfc1042_header);
436 skip_header_bytes -= 2;
437 } else {
438 encaps_data = NULL;
439 encaps_len = 0;
440 }
441
442 skb_pull(skb, skip_header_bytes);
443 nh_pos -= skip_header_bytes;
444 h_pos -= skip_header_bytes;
445
446 head_need = hdrlen + encaps_len - skb_headroom(skb);
447
448 if (head_need > 0 || skb_cloned(skb)) {
449 head_need = max(head_need, 0);
450 if (head_need)
451 skb_orphan(skb);
452
453 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
454 printk(KERN_ERR "failed to reallocate Tx buffer\n");
455 return -ENOMEM;
456 }
457 skb->truesize += head_need;
458 }
459
460 if (encaps_data) {
461 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
462 nh_pos += encaps_len;
463 h_pos += encaps_len;
464 }
465
466 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
467
468 nh_pos += hdrlen;
469 h_pos += hdrlen;
470
471 /* Update skb pointers to various headers since this modified frame
472 * is going to go through Linux networking code that may potentially
473 * need things like pointer to IP header. */
474 skb_set_mac_header(skb, 0);
475 skb_set_network_header(skb, nh_pos);
476 skb_set_transport_header(skb, h_pos);
477
478 return 0;
479}
480EXPORT_SYMBOL(ieee80211_data_from_8023);
481
482/* Given a data frame determine the 802.1p/1d tag to use. */
483unsigned int cfg80211_classify8021d(struct sk_buff *skb)
484{
485 unsigned int dscp;
486
487 /* skb->priority values from 256->263 are magic values to
488 * directly indicate a specific 802.1d priority. This is used
489 * to allow 802.1d priority to be passed directly in from VLAN
490 * tags, etc.
491 */
492 if (skb->priority >= 256 && skb->priority <= 263)
493 return skb->priority - 256;
494
495 switch (skb->protocol) {
496 case htons(ETH_P_IP):
497 dscp = ip_hdr(skb)->tos & 0xfc;
498 break;
499 default:
500 return 0;
501 }
502
503 return dscp >> 5;
504}
505EXPORT_SYMBOL(cfg80211_classify8021d);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0fd1db6e95bb..711e00a0c9b5 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -5,13 +5,14 @@
5 * into cfg80211, when that happens all the exports here go away and 5 * into cfg80211, when that happens all the exports here go away and
6 * we directly assign the wireless handlers of wireless interfaces. 6 * we directly assign the wireless handlers of wireless interfaces.
7 * 7 *
8 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> 8 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
9 */ 9 */
10 10
11#include <linux/wireless.h> 11#include <linux/wireless.h>
12#include <linux/nl80211.h> 12#include <linux/nl80211.h>
13#include <linux/if_arp.h>
14#include <linux/etherdevice.h>
13#include <net/iw_handler.h> 15#include <net/iw_handler.h>
14#include <net/wireless.h>
15#include <net/cfg80211.h> 16#include <net/cfg80211.h>
16#include "core.h" 17#include "core.h"
17 18
@@ -57,7 +58,7 @@ int cfg80211_wext_giwname(struct net_device *dev,
57 58
58 return 0; 59 return 0;
59} 60}
60EXPORT_SYMBOL(cfg80211_wext_giwname); 61EXPORT_SYMBOL_GPL(cfg80211_wext_giwname);
61 62
62int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, 63int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
63 u32 *mode, char *extra) 64 u32 *mode, char *extra)
@@ -108,7 +109,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
108 109
109 return ret; 110 return ret;
110} 111}
111EXPORT_SYMBOL(cfg80211_wext_siwmode); 112EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode);
112 113
113int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, 114int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
114 u32 *mode, char *extra) 115 u32 *mode, char *extra)
@@ -143,7 +144,7 @@ int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
143 } 144 }
144 return 0; 145 return 0;
145} 146}
146EXPORT_SYMBOL(cfg80211_wext_giwmode); 147EXPORT_SYMBOL_GPL(cfg80211_wext_giwmode);
147 148
148 149
149int cfg80211_wext_giwrange(struct net_device *dev, 150int cfg80211_wext_giwrange(struct net_device *dev,
@@ -206,7 +207,6 @@ int cfg80211_wext_giwrange(struct net_device *dev,
206 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 207 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
207 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 208 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
208 209
209
210 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { 210 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
211 int i; 211 int i;
212 struct ieee80211_supported_band *sband; 212 struct ieee80211_supported_band *sband;
@@ -240,4 +240,507 @@ int cfg80211_wext_giwrange(struct net_device *dev,
240 240
241 return 0; 241 return 0;
242} 242}
243EXPORT_SYMBOL(cfg80211_wext_giwrange); 243EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
244
245int cfg80211_wext_siwmlme(struct net_device *dev,
246 struct iw_request_info *info,
247 struct iw_point *data, char *extra)
248{
249 struct wireless_dev *wdev = dev->ieee80211_ptr;
250 struct iw_mlme *mlme = (struct iw_mlme *)extra;
251 struct cfg80211_registered_device *rdev;
252 union {
253 struct cfg80211_disassoc_request disassoc;
254 struct cfg80211_deauth_request deauth;
255 } cmd;
256
257 if (!wdev)
258 return -EOPNOTSUPP;
259
260 rdev = wiphy_to_dev(wdev->wiphy);
261
262 if (wdev->iftype != NL80211_IFTYPE_STATION)
263 return -EINVAL;
264
265 if (mlme->addr.sa_family != ARPHRD_ETHER)
266 return -EINVAL;
267
268 memset(&cmd, 0, sizeof(cmd));
269
270 switch (mlme->cmd) {
271 case IW_MLME_DEAUTH:
272 if (!rdev->ops->deauth)
273 return -EOPNOTSUPP;
274 cmd.deauth.peer_addr = mlme->addr.sa_data;
275 cmd.deauth.reason_code = mlme->reason_code;
276 return rdev->ops->deauth(wdev->wiphy, dev, &cmd.deauth);
277 case IW_MLME_DISASSOC:
278 if (!rdev->ops->disassoc)
279 return -EOPNOTSUPP;
280 cmd.disassoc.peer_addr = mlme->addr.sa_data;
281 cmd.disassoc.reason_code = mlme->reason_code;
282 return rdev->ops->disassoc(wdev->wiphy, dev, &cmd.disassoc);
283 default:
284 return -EOPNOTSUPP;
285 }
286}
287EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
288
289
290/**
291 * cfg80211_wext_freq - get wext frequency for non-"auto"
292 * @wiphy: the wiphy
293 * @freq: the wext freq encoding
294 *
295 * Returns a channel, %NULL for auto, or an ERR_PTR for errors!
296 */
297struct ieee80211_channel *cfg80211_wext_freq(struct wiphy *wiphy,
298 struct iw_freq *freq)
299{
300 struct ieee80211_channel *chan;
301 int f;
302
303 /*
304 * Parse frequency - return NULL for auto and
305 * -EINVAL for impossible things.
306 */
307 if (freq->e == 0) {
308 if (freq->m < 0)
309 return NULL;
310 f = ieee80211_channel_to_frequency(freq->m);
311 } else {
312 int i, div = 1000000;
313 for (i = 0; i < freq->e; i++)
314 div /= 10;
315 if (div <= 0)
316 return ERR_PTR(-EINVAL);
317 f = freq->m / div;
318 }
319
320 /*
321 * Look up channel struct and return -EINVAL when
322 * it cannot be found.
323 */
324 chan = ieee80211_get_channel(wiphy, f);
325 if (!chan)
326 return ERR_PTR(-EINVAL);
327 return chan;
328}
329EXPORT_SYMBOL_GPL(cfg80211_wext_freq);
330
331int cfg80211_wext_siwrts(struct net_device *dev,
332 struct iw_request_info *info,
333 struct iw_param *rts, char *extra)
334{
335 struct wireless_dev *wdev = dev->ieee80211_ptr;
336 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
337 u32 orts = wdev->wiphy->rts_threshold;
338 int err;
339
340 if (rts->disabled || !rts->fixed)
341 wdev->wiphy->rts_threshold = (u32) -1;
342 else if (rts->value < 0)
343 return -EINVAL;
344 else
345 wdev->wiphy->rts_threshold = rts->value;
346
347 err = rdev->ops->set_wiphy_params(wdev->wiphy,
348 WIPHY_PARAM_RTS_THRESHOLD);
349 if (err)
350 wdev->wiphy->rts_threshold = orts;
351
352 return err;
353}
354EXPORT_SYMBOL_GPL(cfg80211_wext_siwrts);
355
356int cfg80211_wext_giwrts(struct net_device *dev,
357 struct iw_request_info *info,
358 struct iw_param *rts, char *extra)
359{
360 struct wireless_dev *wdev = dev->ieee80211_ptr;
361
362 rts->value = wdev->wiphy->rts_threshold;
363 rts->disabled = rts->value == (u32) -1;
364 rts->fixed = 1;
365
366 return 0;
367}
368EXPORT_SYMBOL_GPL(cfg80211_wext_giwrts);
369
370int cfg80211_wext_siwfrag(struct net_device *dev,
371 struct iw_request_info *info,
372 struct iw_param *frag, char *extra)
373{
374 struct wireless_dev *wdev = dev->ieee80211_ptr;
375 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
376 u32 ofrag = wdev->wiphy->frag_threshold;
377 int err;
378
379 if (frag->disabled || !frag->fixed)
380 wdev->wiphy->frag_threshold = (u32) -1;
381 else if (frag->value < 256)
382 return -EINVAL;
383 else {
384 /* Fragment length must be even, so strip LSB. */
385 wdev->wiphy->frag_threshold = frag->value & ~0x1;
386 }
387
388 err = rdev->ops->set_wiphy_params(wdev->wiphy,
389 WIPHY_PARAM_FRAG_THRESHOLD);
390 if (err)
391 wdev->wiphy->frag_threshold = ofrag;
392
393 return err;
394}
395EXPORT_SYMBOL_GPL(cfg80211_wext_siwfrag);
396
397int cfg80211_wext_giwfrag(struct net_device *dev,
398 struct iw_request_info *info,
399 struct iw_param *frag, char *extra)
400{
401 struct wireless_dev *wdev = dev->ieee80211_ptr;
402
403 frag->value = wdev->wiphy->frag_threshold;
404 frag->disabled = frag->value == (u32) -1;
405 frag->fixed = 1;
406
407 return 0;
408}
409EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
410
411int cfg80211_wext_siwretry(struct net_device *dev,
412 struct iw_request_info *info,
413 struct iw_param *retry, char *extra)
414{
415 struct wireless_dev *wdev = dev->ieee80211_ptr;
416 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
417 u32 changed = 0;
418 u8 olong = wdev->wiphy->retry_long;
419 u8 oshort = wdev->wiphy->retry_short;
420 int err;
421
422 if (retry->disabled ||
423 (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
424 return -EINVAL;
425
426 if (retry->flags & IW_RETRY_LONG) {
427 wdev->wiphy->retry_long = retry->value;
428 changed |= WIPHY_PARAM_RETRY_LONG;
429 } else if (retry->flags & IW_RETRY_SHORT) {
430 wdev->wiphy->retry_short = retry->value;
431 changed |= WIPHY_PARAM_RETRY_SHORT;
432 } else {
433 wdev->wiphy->retry_short = retry->value;
434 wdev->wiphy->retry_long = retry->value;
435 changed |= WIPHY_PARAM_RETRY_LONG;
436 changed |= WIPHY_PARAM_RETRY_SHORT;
437 }
438
439 if (!changed)
440 return 0;
441
442 err = rdev->ops->set_wiphy_params(wdev->wiphy, changed);
443 if (err) {
444 wdev->wiphy->retry_short = oshort;
445 wdev->wiphy->retry_long = olong;
446 }
447
448 return err;
449}
450EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry);
451
452int cfg80211_wext_giwretry(struct net_device *dev,
453 struct iw_request_info *info,
454 struct iw_param *retry, char *extra)
455{
456 struct wireless_dev *wdev = dev->ieee80211_ptr;
457
458 retry->disabled = 0;
459
460 if (retry->flags == 0 || (retry->flags & IW_RETRY_SHORT)) {
461 /*
462 * First return short value, iwconfig will ask long value
463 * later if needed
464 */
465 retry->flags |= IW_RETRY_LIMIT;
466 retry->value = wdev->wiphy->retry_short;
467 if (wdev->wiphy->retry_long != wdev->wiphy->retry_short)
468 retry->flags |= IW_RETRY_LONG;
469
470 return 0;
471 }
472
473 if (retry->flags & IW_RETRY_LONG) {
474 retry->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
475 retry->value = wdev->wiphy->retry_long;
476 }
477
478 return 0;
479}
480EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry);
481
482static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
483 struct net_device *dev, const u8 *addr,
484 bool remove, bool tx_key, int idx,
485 struct key_params *params)
486{
487 struct wireless_dev *wdev = dev->ieee80211_ptr;
488 int err;
489
490 if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
491 if (!rdev->ops->set_default_mgmt_key)
492 return -EOPNOTSUPP;
493
494 if (idx < 4 || idx > 5)
495 return -EINVAL;
496 } else if (idx < 0 || idx > 3)
497 return -EINVAL;
498
499 if (remove) {
500 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
501 if (!err) {
502 if (idx == wdev->wext.default_key)
503 wdev->wext.default_key = -1;
504 else if (idx == wdev->wext.default_mgmt_key)
505 wdev->wext.default_mgmt_key = -1;
506 }
507 /*
508 * Applications using wireless extensions expect to be
509 * able to delete keys that don't exist, so allow that.
510 */
511 if (err == -ENOENT)
512 return 0;
513
514 return err;
515 } else {
516 if (addr)
517 tx_key = false;
518
519 if (cfg80211_validate_key_settings(params, idx, addr))
520 return -EINVAL;
521
522 err = rdev->ops->add_key(&rdev->wiphy, dev, idx, addr, params);
523 if (err)
524 return err;
525
526 if (tx_key || (!addr && wdev->wext.default_key == -1)) {
527 err = rdev->ops->set_default_key(&rdev->wiphy,
528 dev, idx);
529 if (!err)
530 wdev->wext.default_key = idx;
531 return err;
532 }
533
534 if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
535 (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
536 err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
537 dev, idx);
538 if (!err)
539 wdev->wext.default_mgmt_key = idx;
540 return err;
541 }
542
543 return 0;
544 }
545}
546
547int cfg80211_wext_siwencode(struct net_device *dev,
548 struct iw_request_info *info,
549 struct iw_point *erq, char *keybuf)
550{
551 struct wireless_dev *wdev = dev->ieee80211_ptr;
552 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
553 int idx, err;
554 bool remove = false;
555 struct key_params params;
556
557 /* no use -- only MFP (set_default_mgmt_key) is optional */
558 if (!rdev->ops->del_key ||
559 !rdev->ops->add_key ||
560 !rdev->ops->set_default_key)
561 return -EOPNOTSUPP;
562
563 idx = erq->flags & IW_ENCODE_INDEX;
564 if (idx == 0) {
565 idx = wdev->wext.default_key;
566 if (idx < 0)
567 idx = 0;
568 } else if (idx < 1 || idx > 4)
569 return -EINVAL;
570 else
571 idx--;
572
573 if (erq->flags & IW_ENCODE_DISABLED)
574 remove = true;
575 else if (erq->length == 0) {
576 /* No key data - just set the default TX key index */
577 err = rdev->ops->set_default_key(&rdev->wiphy, dev, idx);
578 if (!err)
579 wdev->wext.default_key = idx;
580 return err;
581 }
582
583 memset(&params, 0, sizeof(params));
584 params.key = keybuf;
585 params.key_len = erq->length;
586 if (erq->length == 5)
587 params.cipher = WLAN_CIPHER_SUITE_WEP40;
588 else if (erq->length == 13)
589 params.cipher = WLAN_CIPHER_SUITE_WEP104;
590 else if (!remove)
591 return -EINVAL;
592
593 return cfg80211_set_encryption(rdev, dev, NULL, remove,
594 wdev->wext.default_key == -1,
595 idx, &params);
596}
597EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode);
598
599int cfg80211_wext_siwencodeext(struct net_device *dev,
600 struct iw_request_info *info,
601 struct iw_point *erq, char *extra)
602{
603 struct wireless_dev *wdev = dev->ieee80211_ptr;
604 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
605 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
606 const u8 *addr;
607 int idx;
608 bool remove = false;
609 struct key_params params;
610 u32 cipher;
611
612 /* no use -- only MFP (set_default_mgmt_key) is optional */
613 if (!rdev->ops->del_key ||
614 !rdev->ops->add_key ||
615 !rdev->ops->set_default_key)
616 return -EOPNOTSUPP;
617
618 switch (ext->alg) {
619 case IW_ENCODE_ALG_NONE:
620 remove = true;
621 cipher = 0;
622 break;
623 case IW_ENCODE_ALG_WEP:
624 if (ext->key_len == 5)
625 cipher = WLAN_CIPHER_SUITE_WEP40;
626 else if (ext->key_len == 13)
627 cipher = WLAN_CIPHER_SUITE_WEP104;
628 else
629 return -EINVAL;
630 break;
631 case IW_ENCODE_ALG_TKIP:
632 cipher = WLAN_CIPHER_SUITE_TKIP;
633 break;
634 case IW_ENCODE_ALG_CCMP:
635 cipher = WLAN_CIPHER_SUITE_CCMP;
636 break;
637 case IW_ENCODE_ALG_AES_CMAC:
638 cipher = WLAN_CIPHER_SUITE_AES_CMAC;
639 break;
640 default:
641 return -EOPNOTSUPP;
642 }
643
644 if (erq->flags & IW_ENCODE_DISABLED)
645 remove = true;
646
647 idx = erq->flags & IW_ENCODE_INDEX;
648 if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
649 if (idx < 4 || idx > 5) {
650 idx = wdev->wext.default_mgmt_key;
651 if (idx < 0)
652 return -EINVAL;
653 } else
654 idx--;
655 } else {
656 if (idx < 1 || idx > 4) {
657 idx = wdev->wext.default_key;
658 if (idx < 0)
659 return -EINVAL;
660 } else
661 idx--;
662 }
663
664 addr = ext->addr.sa_data;
665 if (is_broadcast_ether_addr(addr))
666 addr = NULL;
667
668 memset(&params, 0, sizeof(params));
669 params.key = ext->key;
670 params.key_len = ext->key_len;
671 params.cipher = cipher;
672
673 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
674 params.seq = ext->rx_seq;
675 params.seq_len = 6;
676 }
677
678 return cfg80211_set_encryption(
679 rdev, dev, addr, remove,
680 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
681 idx, &params);
682}
683EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
684
685struct giwencode_cookie {
686 size_t buflen;
687 char *keybuf;
688};
689
690static void giwencode_get_key_cb(void *cookie, struct key_params *params)
691{
692 struct giwencode_cookie *data = cookie;
693
694 if (!params->key) {
695 data->buflen = 0;
696 return;
697 }
698
699 data->buflen = min_t(size_t, data->buflen, params->key_len);
700 memcpy(data->keybuf, params->key, data->buflen);
701}
702
703int cfg80211_wext_giwencode(struct net_device *dev,
704 struct iw_request_info *info,
705 struct iw_point *erq, char *keybuf)
706{
707 struct wireless_dev *wdev = dev->ieee80211_ptr;
708 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
709 int idx, err;
710 struct giwencode_cookie data = {
711 .keybuf = keybuf,
712 .buflen = erq->length,
713 };
714
715 if (!rdev->ops->get_key)
716 return -EOPNOTSUPP;
717
718 idx = erq->flags & IW_ENCODE_INDEX;
719 if (idx == 0) {
720 idx = wdev->wext.default_key;
721 if (idx < 0)
722 idx = 0;
723 } else if (idx < 1 || idx > 4)
724 return -EINVAL;
725 else
726 idx--;
727
728 erq->flags = idx + 1;
729
730 err = rdev->ops->get_key(&rdev->wiphy, dev, idx, NULL, &data,
731 giwencode_get_key_cb);
732 if (!err) {
733 erq->length = data.buflen;
734 erq->flags |= IW_ENCODE_ENABLED;
735 return 0;
736 }
737
738 if (err == -ENOENT) {
739 erq->flags |= IW_ENCODE_DISABLED;
740 erq->length = 0;
741 return 0;
742 }
743
744 return err;
745}
746EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 0e59f9ae9b81..252c2010c2e2 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -636,8 +636,10 @@ static void wireless_seq_printf_stats(struct seq_file *seq,
636/* 636/*
637 * Print info for /proc/net/wireless (print all entries) 637 * Print info for /proc/net/wireless (print all entries)
638 */ 638 */
639static int wireless_seq_show(struct seq_file *seq, void *v) 639static int wireless_dev_seq_show(struct seq_file *seq, void *v)
640{ 640{
641 might_sleep();
642
641 if (v == SEQ_START_TOKEN) 643 if (v == SEQ_START_TOKEN)
642 seq_printf(seq, "Inter-| sta-| Quality | Discarded " 644 seq_printf(seq, "Inter-| sta-| Quality | Discarded "
643 "packets | Missed | WE\n" 645 "packets | Missed | WE\n"
@@ -649,14 +651,46 @@ static int wireless_seq_show(struct seq_file *seq, void *v)
649 return 0; 651 return 0;
650} 652}
651 653
654static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos)
655{
656 struct net *net = seq_file_net(seq);
657 loff_t off;
658 struct net_device *dev;
659
660 rtnl_lock();
661 if (!*pos)
662 return SEQ_START_TOKEN;
663
664 off = 1;
665 for_each_netdev(net, dev)
666 if (off++ == *pos)
667 return dev;
668 return NULL;
669}
670
671static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
672{
673 struct net *net = seq_file_net(seq);
674
675 ++*pos;
676
677 return v == SEQ_START_TOKEN ?
678 first_net_device(net) : next_net_device(v);
679}
680
681static void wireless_dev_seq_stop(struct seq_file *seq, void *v)
682{
683 rtnl_unlock();
684}
685
652static const struct seq_operations wireless_seq_ops = { 686static const struct seq_operations wireless_seq_ops = {
653 .start = dev_seq_start, 687 .start = wireless_dev_seq_start,
654 .next = dev_seq_next, 688 .next = wireless_dev_seq_next,
655 .stop = dev_seq_stop, 689 .stop = wireless_dev_seq_stop,
656 .show = wireless_seq_show, 690 .show = wireless_dev_seq_show,
657}; 691};
658 692
659static int wireless_seq_open(struct inode *inode, struct file *file) 693static int seq_open_wireless(struct inode *inode, struct file *file)
660{ 694{
661 return seq_open_net(inode, file, &wireless_seq_ops, 695 return seq_open_net(inode, file, &wireless_seq_ops,
662 sizeof(struct seq_net_private)); 696 sizeof(struct seq_net_private));
@@ -664,7 +698,7 @@ static int wireless_seq_open(struct inode *inode, struct file *file)
664 698
665static const struct file_operations wireless_seq_fops = { 699static const struct file_operations wireless_seq_fops = {
666 .owner = THIS_MODULE, 700 .owner = THIS_MODULE,
667 .open = wireless_seq_open, 701 .open = seq_open_wireless,
668 .read = seq_read, 702 .read = seq_read,
669 .llseek = seq_lseek, 703 .llseek = seq_lseek,
670 .release = seq_release_net, 704 .release = seq_release_net,