aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fddi.c4
-rw-r--r--net/802/hippi.c5
-rw-r--r--net/8021q/vlan_dev.c1
-rw-r--r--net/appletalk/dev.c11
-rw-r--r--net/atm/br2684.c26
-rw-r--r--net/atm/clip.c27
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/core/dev.c133
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/net-sysfs.c1
-rw-r--r--net/core/netpoll.c5
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/skbuff.c70
-rw-r--r--net/core/sock.c103
-rw-r--r--net/decnet/af_decnet.c19
-rw-r--r--net/decnet/dn_nsp_out.c8
-rw-r--r--net/econet/af_econet.c18
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/af_inet.c21
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/route.c60
-rw-r--r--net/ipv4/tcp.c47
-rw-r--r--net/ipv4/tcp_input.c118
-rw-r--r--net/ipv6/af_inet6.c13
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/irda/irlap_frame.c18
-rw-r--r--net/llc/llc_conn.c4
-rw-r--r--net/mac80211/debugfs.c16
-rw-r--r--net/mac80211/ibss.c33
-rw-r--r--net/mac80211/ieee80211_i.h43
-rw-r--r--net/mac80211/key.c6
-rw-r--r--net/mac80211/main.c24
-rw-r--r--net/mac80211/mesh.c40
-rw-r--r--net/mac80211/mesh.h16
-rw-r--r--net/mac80211/mesh_hwmp.c8
-rw-r--r--net/mac80211/mesh_plink.c21
-rw-r--r--net/mac80211/mlme.c227
-rw-r--r--net/mac80211/pm.c80
-rw-r--r--net/mac80211/rx.c94
-rw-r--r--net/mac80211/scan.c18
-rw-r--r--net/mac80211/spectmgmt.c101
-rw-r--r--net/mac80211/sta_info.c6
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c116
-rw-r--r--net/mac80211/wext.c5
-rw-r--r--net/mac80211/wme.c30
-rw-r--r--net/rxrpc/ar-connection.c12
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/wireless/Kconfig8
-rw-r--r--net/wireless/Makefile1
-rw-r--r--net/wireless/core.c7
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/debugfs.c131
-rw-r--r--net/wireless/debugfs.h14
-rw-r--r--net/wireless/nl80211.c68
-rw-r--r--net/wireless/reg.c223
-rw-r--r--net/wireless/util.c320
-rw-r--r--net/wireless/wext-compat.c7
-rw-r--r--net/wireless/wext.c39
62 files changed, 1607 insertions, 870 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 539e6064e6d4..3ef0ab0a543a 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -185,10 +185,6 @@ static const struct header_ops fddi_header_ops = {
185static void fddi_setup(struct net_device *dev) 185static void fddi_setup(struct net_device *dev)
186{ 186{
187 dev->header_ops = &fddi_header_ops; 187 dev->header_ops = &fddi_header_ops;
188#ifdef CONFIG_COMPAT_NET_DEV_OPS
189 dev->change_mtu = fddi_change_mtu,
190#endif
191
192 dev->type = ARPHRD_FDDI; 188 dev->type = ARPHRD_FDDI;
193 dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */ 189 dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
194 dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */ 190 dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 313b9ebf92ee..cd3e8e929529 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -193,11 +193,6 @@ static const struct header_ops hippi_header_ops = {
193 193
194static void hippi_setup(struct net_device *dev) 194static void hippi_setup(struct net_device *dev)
195{ 195{
196#ifdef CONFIG_COMPAT_NET_DEV_OPS
197 dev->change_mtu = hippi_change_mtu;
198 dev->set_mac_address = hippi_mac_addr;
199 dev->neigh_setup = hippi_neigh_setup_dev;
200#endif
201 dev->header_ops = &hippi_header_ops; 196 dev->header_ops = &hippi_header_ops;
202 197
203 /* 198 /*
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ff7572ac5481..1e2ad4c7c59b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -644,7 +644,6 @@ static int vlan_dev_init(struct net_device *dev)
644 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 644 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
645 dev->netdev_ops = &vlan_netdev_ops; 645 dev->netdev_ops = &vlan_netdev_ops;
646 } 646 }
647 netdev_resync_ops(dev);
648 647
649 if (is_vlan_dev(real_dev)) 648 if (is_vlan_dev(real_dev))
650 subclass = 1; 649 subclass = 1;
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
index 72277d70c980..6c8016f61866 100644
--- a/net/appletalk/dev.c
+++ b/net/appletalk/dev.c
@@ -9,21 +9,10 @@
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/if_ltalk.h> 10#include <linux/if_ltalk.h>
11 11
12#ifdef CONFIG_COMPAT_NET_DEV_OPS
13static int ltalk_change_mtu(struct net_device *dev, int mtu)
14{
15 return -EINVAL;
16}
17#endif
18
19static void ltalk_setup(struct net_device *dev) 12static void ltalk_setup(struct net_device *dev)
20{ 13{
21 /* Fill in the fields of the device structure with localtalk-generic values. */ 14 /* Fill in the fields of the device structure with localtalk-generic values. */
22 15
23#ifdef CONFIG_COMPAT_NET_DEV_OPS
24 dev->change_mtu = ltalk_change_mtu;
25#endif
26
27 dev->type = ARPHRD_LOCALTLK; 16 dev->type = ARPHRD_LOCALTLK;
28 dev->hard_header_len = LTALK_HLEN; 17 dev->hard_header_len = LTALK_HLEN;
29 dev->mtu = LTALK_MTU; 18 dev->mtu = LTALK_MTU;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 3100a8940afc..bfa8fa9894fc 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -445,9 +445,10 @@ free_skb:
445 */ 445 */
446static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) 446static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
447{ 447{
448 struct sk_buff_head queue;
448 int err; 449 int err;
449 struct br2684_vcc *brvcc; 450 struct br2684_vcc *brvcc;
450 struct sk_buff *skb; 451 struct sk_buff *skb, *tmp;
451 struct sk_buff_head *rq; 452 struct sk_buff_head *rq;
452 struct br2684_dev *brdev; 453 struct br2684_dev *brdev;
453 struct net_device *net_dev; 454 struct net_device *net_dev;
@@ -505,29 +506,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
505 barrier(); 506 barrier();
506 atmvcc->push = br2684_push; 507 atmvcc->push = br2684_push;
507 508
509 __skb_queue_head_init(&queue);
508 rq = &sk_atm(atmvcc)->sk_receive_queue; 510 rq = &sk_atm(atmvcc)->sk_receive_queue;
509 511
510 spin_lock_irqsave(&rq->lock, flags); 512 spin_lock_irqsave(&rq->lock, flags);
511 if (skb_queue_empty(rq)) { 513 skb_queue_splice_init(rq, &queue);
512 skb = NULL;
513 } else {
514 /* NULL terminate the list. */
515 rq->prev->next = NULL;
516 skb = rq->next;
517 }
518 rq->prev = rq->next = (struct sk_buff *)rq;
519 rq->qlen = 0;
520 spin_unlock_irqrestore(&rq->lock, flags); 514 spin_unlock_irqrestore(&rq->lock, flags);
521 515
522 while (skb) { 516 skb_queue_walk_safe(&queue, skb, tmp) {
523 struct sk_buff *next = skb->next; 517 struct net_device *dev = skb->dev;
524 518
525 skb->next = skb->prev = NULL; 519 dev->stats.rx_bytes -= skb->len;
526 br2684_push(atmvcc, skb); 520 dev->stats.rx_packets--;
527 skb->dev->stats.rx_bytes -= skb->len;
528 skb->dev->stats.rx_packets--;
529 521
530 skb = next; 522 br2684_push(atmvcc, skb);
531 } 523 }
532 __module_get(THIS_MODULE); 524 __module_get(THIS_MODULE);
533 return 0; 525 return 0;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 3dc0a3a42a57..5597b87b9e64 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -445,9 +445,9 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
445 445
446static int clip_mkip(struct atm_vcc *vcc, int timeout) 446static int clip_mkip(struct atm_vcc *vcc, int timeout)
447{ 447{
448 struct sk_buff_head *rq, queue;
448 struct clip_vcc *clip_vcc; 449 struct clip_vcc *clip_vcc;
449 struct sk_buff *skb; 450 struct sk_buff *skb, *tmp;
450 struct sk_buff_head *rq;
451 unsigned long flags; 451 unsigned long flags;
452 452
453 if (!vcc->push) 453 if (!vcc->push)
@@ -469,39 +469,28 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
469 vcc->push = clip_push; 469 vcc->push = clip_push;
470 vcc->pop = clip_pop; 470 vcc->pop = clip_pop;
471 471
472 __skb_queue_head_init(&queue);
472 rq = &sk_atm(vcc)->sk_receive_queue; 473 rq = &sk_atm(vcc)->sk_receive_queue;
473 474
474 spin_lock_irqsave(&rq->lock, flags); 475 spin_lock_irqsave(&rq->lock, flags);
475 if (skb_queue_empty(rq)) { 476 skb_queue_splice_init(rq, &queue);
476 skb = NULL;
477 } else {
478 /* NULL terminate the list. */
479 rq->prev->next = NULL;
480 skb = rq->next;
481 }
482 rq->prev = rq->next = (struct sk_buff *)rq;
483 rq->qlen = 0;
484 spin_unlock_irqrestore(&rq->lock, flags); 477 spin_unlock_irqrestore(&rq->lock, flags);
485 478
486 /* re-process everything received between connection setup and MKIP */ 479 /* re-process everything received between connection setup and MKIP */
487 while (skb) { 480 skb_queue_walk_safe(&queue, skb, tmp) {
488 struct sk_buff *next = skb->next;
489
490 skb->next = skb->prev = NULL;
491 if (!clip_devs) { 481 if (!clip_devs) {
492 atm_return(vcc, skb->truesize); 482 atm_return(vcc, skb->truesize);
493 kfree_skb(skb); 483 kfree_skb(skb);
494 } else { 484 } else {
485 struct net_device *dev = skb->dev;
495 unsigned int len = skb->len; 486 unsigned int len = skb->len;
496 487
497 skb_get(skb); 488 skb_get(skb);
498 clip_push(vcc, skb); 489 clip_push(vcc, skb);
499 skb->dev->stats.rx_packets--; 490 dev->stats.rx_packets--;
500 skb->dev->stats.rx_bytes -= len; 491 dev->stats.rx_bytes -= len;
501 kfree_skb(skb); 492 kfree_skb(skb);
502 } 493 }
503
504 skb = next;
505 } 494 }
506 return 0; 495 return 0;
507} 496}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index a48f5efdb6bf..cb3e97b93aeb 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -398,7 +398,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
398 if (unlikely(fdb->is_local)) { 398 if (unlikely(fdb->is_local)) {
399 if (net_ratelimit()) 399 if (net_ratelimit())
400 printk(KERN_WARNING "%s: received packet with " 400 printk(KERN_WARNING "%s: received packet with "
401 " own address as source address\n", 401 "own address as source address\n",
402 source->dev->name); 402 source->dev->name);
403 } else { 403 } else {
404 /* fastpath: update of existing entry */ 404 /* fastpath: update of existing entry */
diff --git a/net/core/dev.c b/net/core/dev.c
index 3942266d1f6c..32ceee17896e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1698,6 +1698,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1698 skb->dst = NULL; 1698 skb->dst = NULL;
1699 } 1699 }
1700 rc = ops->ndo_start_xmit(skb, dev); 1700 rc = ops->ndo_start_xmit(skb, dev);
1701 if (rc == 0)
1702 txq_trans_update(txq);
1701 /* 1703 /*
1702 * TODO: if skb_orphan() was called by 1704 * TODO: if skb_orphan() was called by
1703 * dev->hard_start_xmit() (for example, the unmodified 1705 * dev->hard_start_xmit() (for example, the unmodified
@@ -1727,6 +1729,7 @@ gso:
1727 skb->next = nskb; 1729 skb->next = nskb;
1728 return rc; 1730 return rc;
1729 } 1731 }
1732 txq_trans_update(txq);
1730 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 1733 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1731 return NETDEV_TX_BUSY; 1734 return NETDEV_TX_BUSY;
1732 } while (skb->next); 1735 } while (skb->next);
@@ -2387,21 +2390,6 @@ void napi_gro_flush(struct napi_struct *napi)
2387} 2390}
2388EXPORT_SYMBOL(napi_gro_flush); 2391EXPORT_SYMBOL(napi_gro_flush);
2389 2392
2390void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2391{
2392 unsigned int offset = skb_gro_offset(skb);
2393
2394 hlen += offset;
2395 if (unlikely(skb_headlen(skb) ||
2396 skb_shinfo(skb)->frags[0].size < hlen ||
2397 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2398 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2399
2400 return page_address(skb_shinfo(skb)->frags[0].page) +
2401 skb_shinfo(skb)->frags[0].page_offset + offset;
2402}
2403EXPORT_SYMBOL(skb_gro_header);
2404
2405int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2406{ 2394{
2407 struct sk_buff **pp = NULL; 2395 struct sk_buff **pp = NULL;
@@ -2464,10 +2452,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2464 ret = GRO_HELD; 2452 ret = GRO_HELD;
2465 2453
2466pull: 2454pull:
2467 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { 2455 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2468 if (napi->gro_list == skb) 2456 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2469 napi->gro_list = skb->next; 2457
2470 ret = GRO_DROP; 2458 BUG_ON(skb->end - skb->tail < grow);
2459
2460 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2461
2462 skb->tail += grow;
2463 skb->data_len -= grow;
2464
2465 skb_shinfo(skb)->frags[0].page_offset += grow;
2466 skb_shinfo(skb)->frags[0].size -= grow;
2467
2468 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2469 put_page(skb_shinfo(skb)->frags[0].page);
2470 memmove(skb_shinfo(skb)->frags,
2471 skb_shinfo(skb)->frags + 1,
2472 --skb_shinfo(skb)->nr_frags);
2473 }
2471 } 2474 }
2472 2475
2473ok: 2476ok:
@@ -2517,6 +2520,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb)
2517} 2520}
2518EXPORT_SYMBOL(napi_skb_finish); 2521EXPORT_SYMBOL(napi_skb_finish);
2519 2522
2523void skb_gro_reset_offset(struct sk_buff *skb)
2524{
2525 NAPI_GRO_CB(skb)->data_offset = 0;
2526 NAPI_GRO_CB(skb)->frag0 = NULL;
2527 NAPI_GRO_CB(skb)->frag0_len = 0;
2528
2529 if (skb->mac_header == skb->tail &&
2530 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2531 NAPI_GRO_CB(skb)->frag0 =
2532 page_address(skb_shinfo(skb)->frags[0].page) +
2533 skb_shinfo(skb)->frags[0].page_offset;
2534 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2535 }
2536}
2537EXPORT_SYMBOL(skb_gro_reset_offset);
2538
2520int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2539int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2521{ 2540{
2522 skb_gro_reset_offset(skb); 2541 skb_gro_reset_offset(skb);
@@ -2586,17 +2605,24 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2586{ 2605{
2587 struct sk_buff *skb = napi->skb; 2606 struct sk_buff *skb = napi->skb;
2588 struct ethhdr *eth; 2607 struct ethhdr *eth;
2608 unsigned int hlen;
2609 unsigned int off;
2589 2610
2590 napi->skb = NULL; 2611 napi->skb = NULL;
2591 2612
2592 skb_reset_mac_header(skb); 2613 skb_reset_mac_header(skb);
2593 skb_gro_reset_offset(skb); 2614 skb_gro_reset_offset(skb);
2594 2615
2595 eth = skb_gro_header(skb, sizeof(*eth)); 2616 off = skb_gro_offset(skb);
2596 if (!eth) { 2617 hlen = off + sizeof(*eth);
2597 napi_reuse_skb(napi, skb); 2618 eth = skb_gro_header_fast(skb, off);
2598 skb = NULL; 2619 if (skb_gro_header_hard(skb, hlen)) {
2599 goto out; 2620 eth = skb_gro_header_slow(skb, hlen, off);
2621 if (unlikely(!eth)) {
2622 napi_reuse_skb(napi, skb);
2623 skb = NULL;
2624 goto out;
2625 }
2600 } 2626 }
2601 2627
2602 skb_gro_pull(skb, sizeof(*eth)); 2628 skb_gro_pull(skb, sizeof(*eth));
@@ -4580,39 +4606,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4580} 4606}
4581EXPORT_SYMBOL(netdev_fix_features); 4607EXPORT_SYMBOL(netdev_fix_features);
4582 4608
4583/* Some devices need to (re-)set their netdev_ops inside
4584 * ->init() or similar. If that happens, we have to setup
4585 * the compat pointers again.
4586 */
4587void netdev_resync_ops(struct net_device *dev)
4588{
4589#ifdef CONFIG_COMPAT_NET_DEV_OPS
4590 const struct net_device_ops *ops = dev->netdev_ops;
4591
4592 dev->init = ops->ndo_init;
4593 dev->uninit = ops->ndo_uninit;
4594 dev->open = ops->ndo_open;
4595 dev->change_rx_flags = ops->ndo_change_rx_flags;
4596 dev->set_rx_mode = ops->ndo_set_rx_mode;
4597 dev->set_multicast_list = ops->ndo_set_multicast_list;
4598 dev->set_mac_address = ops->ndo_set_mac_address;
4599 dev->validate_addr = ops->ndo_validate_addr;
4600 dev->do_ioctl = ops->ndo_do_ioctl;
4601 dev->set_config = ops->ndo_set_config;
4602 dev->change_mtu = ops->ndo_change_mtu;
4603 dev->neigh_setup = ops->ndo_neigh_setup;
4604 dev->tx_timeout = ops->ndo_tx_timeout;
4605 dev->get_stats = ops->ndo_get_stats;
4606 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4607 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4608 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4609#ifdef CONFIG_NET_POLL_CONTROLLER
4610 dev->poll_controller = ops->ndo_poll_controller;
4611#endif
4612#endif
4613}
4614EXPORT_SYMBOL(netdev_resync_ops);
4615
4616/** 4609/**
4617 * register_netdevice - register a network device 4610 * register_netdevice - register a network device
4618 * @dev: device to register 4611 * @dev: device to register
@@ -4652,23 +4645,6 @@ int register_netdevice(struct net_device *dev)
4652 4645
4653 dev->iflink = -1; 4646 dev->iflink = -1;
4654 4647
4655#ifdef CONFIG_COMPAT_NET_DEV_OPS
4656 /* Netdevice_ops API compatibility support.
4657 * This is temporary until all network devices are converted.
4658 */
4659 if (dev->netdev_ops) {
4660 netdev_resync_ops(dev);
4661 } else {
4662 char drivername[64];
4663 pr_info("%s (%s): not using net_device_ops yet\n",
4664 dev->name, netdev_drivername(dev, drivername, 64));
4665
4666 /* This works only because net_device_ops and the
4667 compatibility structure are the same. */
4668 dev->netdev_ops = (void *) &(dev->init);
4669 }
4670#endif
4671
4672 /* Init, if this function is available */ 4648 /* Init, if this function is available */
4673 if (dev->netdev_ops->ndo_init) { 4649 if (dev->netdev_ops->ndo_init) {
4674 ret = dev->netdev_ops->ndo_init(dev); 4650 ret = dev->netdev_ops->ndo_init(dev);
@@ -5012,18 +4988,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5012 struct netdev_queue *tx; 4988 struct netdev_queue *tx;
5013 struct net_device *dev; 4989 struct net_device *dev;
5014 size_t alloc_size; 4990 size_t alloc_size;
5015 void *p; 4991 struct net_device *p;
5016 4992
5017 BUG_ON(strlen(name) >= sizeof(dev->name)); 4993 BUG_ON(strlen(name) >= sizeof(dev->name));
5018 4994
5019 alloc_size = sizeof(struct net_device); 4995 alloc_size = sizeof(struct net_device);
5020 if (sizeof_priv) { 4996 if (sizeof_priv) {
5021 /* ensure 32-byte alignment of private area */ 4997 /* ensure 32-byte alignment of private area */
5022 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 4998 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5023 alloc_size += sizeof_priv; 4999 alloc_size += sizeof_priv;
5024 } 5000 }
5025 /* ensure 32-byte alignment of whole construct */ 5001 /* ensure 32-byte alignment of whole construct */
5026 alloc_size += NETDEV_ALIGN_CONST; 5002 alloc_size += NETDEV_ALIGN - 1;
5027 5003
5028 p = kzalloc(alloc_size, GFP_KERNEL); 5004 p = kzalloc(alloc_size, GFP_KERNEL);
5029 if (!p) { 5005 if (!p) {
@@ -5038,8 +5014,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5038 goto free_p; 5014 goto free_p;
5039 } 5015 }
5040 5016
5041 dev = (struct net_device *) 5017 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5042 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
5043 dev->padded = (char *)dev - (char *)p; 5018 dev->padded = (char *)dev - (char *)p;
5044 5019
5045 if (dev_addr_init(dev)) 5020 if (dev_addr_init(dev))
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 6d62d4618cfc..78e5bfc454ae 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -128,12 +128,12 @@ static void est_timer(unsigned long arg)
128 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
129 brate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
130 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log; 131 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
132 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
133 133
134 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
135 e->last_packets = npackets; 135 e->last_packets = npackets;
136 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; 136 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
137 e->rate_est->pps = (e->avpps+0x1FF)>>10; 137 e->rate_est->pps = (e->avpps+0x1FF)>>10;
138skip: 138skip:
139 read_unlock(&est_lock); 139 read_unlock(&est_lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b9641e816eee..3994680c08b9 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -499,7 +499,6 @@ int netdev_register_kobject(struct net_device *net)
499 dev->platform_data = net; 499 dev->platform_data = net;
500 dev->groups = groups; 500 dev->groups = groups;
501 501
502 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
503 dev_set_name(dev, "%s", net->name); 502 dev_set_name(dev, "%s", net->name);
504 503
505#ifdef CONFIG_SYSFS 504#ifdef CONFIG_SYSFS
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 67b4f3e3d4a5..7ab31a7576a1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -302,8 +302,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
302 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 302 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
303 tries > 0; --tries) { 303 tries > 0; --tries) {
304 if (__netif_tx_trylock(txq)) { 304 if (__netif_tx_trylock(txq)) {
305 if (!netif_tx_queue_stopped(txq)) 305 if (!netif_tx_queue_stopped(txq)) {
306 status = ops->ndo_start_xmit(skb, dev); 306 status = ops->ndo_start_xmit(skb, dev);
307 if (status == NETDEV_TX_OK)
308 txq_trans_update(txq);
309 }
307 __netif_tx_unlock(txq); 310 __netif_tx_unlock(txq);
308 311
309 if (status == NETDEV_TX_OK) 312 if (status == NETDEV_TX_OK)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3779c1438c11..b8ccd3c88d63 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2447 if (pkt_dev->cflows) { 2447 if (pkt_dev->cflows) {
2448 /* let go of the SAs if we have them */ 2448 /* let go of the SAs if we have them */
2449 int i = 0; 2449 int i = 0;
2450 for (; i < pkt_dev->nflows; i++){ 2450 for (; i < pkt_dev->cflows; i++) {
2451 struct xfrm_state *x = pkt_dev->flows[i].x; 2451 struct xfrm_state *x = pkt_dev->flows[i].x;
2452 if (x) { 2452 if (x) {
2453 xfrm_state_put(x); 2453 xfrm_state_put(x);
@@ -3438,6 +3438,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3438 retry_now: 3438 retry_now:
3439 ret = (*xmit)(pkt_dev->skb, odev); 3439 ret = (*xmit)(pkt_dev->skb, odev);
3440 if (likely(ret == NETDEV_TX_OK)) { 3440 if (likely(ret == NETDEV_TX_OK)) {
3441 txq_trans_update(txq);
3441 pkt_dev->last_ok = 1; 3442 pkt_dev->last_ok = 1;
3442 pkt_dev->sofar++; 3443 pkt_dev->sofar++;
3443 pkt_dev->seq_num++; 3444 pkt_dev->seq_num++;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d152394b2611..8e815e685f28 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -526,8 +526,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
526 new->sp = secpath_get(old->sp); 526 new->sp = secpath_get(old->sp);
527#endif 527#endif
528 memcpy(new->cb, old->cb, sizeof(old->cb)); 528 memcpy(new->cb, old->cb, sizeof(old->cb));
529 new->csum_start = old->csum_start; 529 new->csum = old->csum;
530 new->csum_offset = old->csum_offset;
531 new->local_df = old->local_df; 530 new->local_df = old->local_df;
532 new->pkt_type = old->pkt_type; 531 new->pkt_type = old->pkt_type;
533 new->ip_summed = old->ip_summed; 532 new->ip_summed = old->ip_summed;
@@ -538,6 +537,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
538#endif 537#endif
539 new->protocol = old->protocol; 538 new->protocol = old->protocol;
540 new->mark = old->mark; 539 new->mark = old->mark;
540 new->iif = old->iif;
541 __nf_copy(new, old); 541 __nf_copy(new, old);
542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -550,10 +550,18 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
550#endif 550#endif
551#endif 551#endif
552 new->vlan_tci = old->vlan_tci; 552 new->vlan_tci = old->vlan_tci;
553#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
554 new->do_not_encrypt = old->do_not_encrypt;
555 new->requeue = old->requeue;
556#endif
553 557
554 skb_copy_secmark(new, old); 558 skb_copy_secmark(new, old);
555} 559}
556 560
561/*
562 * You should not add any new code to this function. Add it to
563 * __copy_skb_header above instead.
564 */
557static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 565static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
558{ 566{
559#define C(x) n->x = skb->x 567#define C(x) n->x = skb->x
@@ -569,16 +577,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
569 n->cloned = 1; 577 n->cloned = 1;
570 n->nohdr = 0; 578 n->nohdr = 0;
571 n->destructor = NULL; 579 n->destructor = NULL;
572 C(iif);
573 C(tail); 580 C(tail);
574 C(end); 581 C(end);
575 C(head); 582 C(head);
576 C(data); 583 C(data);
577 C(truesize); 584 C(truesize);
578#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
579 C(do_not_encrypt);
580 C(requeue);
581#endif
582 atomic_set(&n->users, 1); 585 atomic_set(&n->users, 1);
583 586
584 atomic_inc(&(skb_shinfo(skb)->dataref)); 587 atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -2288,7 +2291,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2288next_skb: 2291next_skb:
2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2292 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290 2293
2291 if (abs_offset < block_limit) { 2294 if (abs_offset < block_limit && !st->frag_data) {
2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2295 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293 return block_limit - abs_offset; 2296 return block_limit - abs_offset;
2294 } 2297 }
@@ -2661,30 +2664,40 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2661{ 2664{
2662 struct sk_buff *p = *head; 2665 struct sk_buff *p = *head;
2663 struct sk_buff *nskb; 2666 struct sk_buff *nskb;
2667 struct skb_shared_info *skbinfo = skb_shinfo(skb);
2668 struct skb_shared_info *pinfo = skb_shinfo(p);
2664 unsigned int headroom; 2669 unsigned int headroom;
2665 unsigned int len = skb_gro_len(skb); 2670 unsigned int len = skb_gro_len(skb);
2671 unsigned int offset = skb_gro_offset(skb);
2672 unsigned int headlen = skb_headlen(skb);
2666 2673
2667 if (p->len + len >= 65536) 2674 if (p->len + len >= 65536)
2668 return -E2BIG; 2675 return -E2BIG;
2669 2676
2670 if (skb_shinfo(p)->frag_list) 2677 if (pinfo->frag_list)
2671 goto merge; 2678 goto merge;
2672 else if (skb_headlen(skb) <= skb_gro_offset(skb)) { 2679 else if (headlen <= offset) {
2673 if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > 2680 skb_frag_t *frag;
2674 MAX_SKB_FRAGS) 2681 skb_frag_t *frag2;
2682 int i = skbinfo->nr_frags;
2683 int nr_frags = pinfo->nr_frags + i;
2684
2685 offset -= headlen;
2686
2687 if (nr_frags > MAX_SKB_FRAGS)
2675 return -E2BIG; 2688 return -E2BIG;
2676 2689
2677 skb_shinfo(skb)->frags[0].page_offset += 2690 pinfo->nr_frags = nr_frags;
2678 skb_gro_offset(skb) - skb_headlen(skb); 2691 skbinfo->nr_frags = 0;
2679 skb_shinfo(skb)->frags[0].size -=
2680 skb_gro_offset(skb) - skb_headlen(skb);
2681 2692
2682 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2693 frag = pinfo->frags + nr_frags;
2683 skb_shinfo(skb)->frags, 2694 frag2 = skbinfo->frags + i;
2684 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2695 do {
2696 *--frag = *--frag2;
2697 } while (--i);
2685 2698
2686 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; 2699 frag->page_offset += offset;
2687 skb_shinfo(skb)->nr_frags = 0; 2700 frag->size -= offset;
2688 2701
2689 skb->truesize -= skb->data_len; 2702 skb->truesize -= skb->data_len;
2690 skb->len -= skb->data_len; 2703 skb->len -= skb->data_len;
@@ -2715,7 +2728,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2715 2728
2716 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2729 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2717 skb_shinfo(nskb)->frag_list = p; 2730 skb_shinfo(nskb)->frag_list = p;
2718 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; 2731 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2719 skb_header_release(p); 2732 skb_header_release(p);
2720 nskb->prev = p; 2733 nskb->prev = p;
2721 2734
@@ -2730,16 +2743,13 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2730 p = nskb; 2743 p = nskb;
2731 2744
2732merge: 2745merge:
2733 if (skb_gro_offset(skb) > skb_headlen(skb)) { 2746 if (offset > headlen) {
2734 skb_shinfo(skb)->frags[0].page_offset += 2747 skbinfo->frags[0].page_offset += offset - headlen;
2735 skb_gro_offset(skb) - skb_headlen(skb); 2748 skbinfo->frags[0].size -= offset - headlen;
2736 skb_shinfo(skb)->frags[0].size -= 2749 offset = headlen;
2737 skb_gro_offset(skb) - skb_headlen(skb);
2738 skb_gro_reset_offset(skb);
2739 skb_gro_pull(skb, skb_headlen(skb));
2740 } 2750 }
2741 2751
2742 __skb_pull(skb, skb_gro_offset(skb)); 2752 __skb_pull(skb, offset);
2743 2753
2744 p->prev->next = skb; 2754 p->prev->next = skb;
2745 p->prev = skb; 2755 p->prev = skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index 7dbf3ffb35cc..58dec9dff99a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -212,6 +212,7 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
212 212
213/* Maximal space eaten by iovec or ancilliary data plus some space */ 213/* Maximal space eaten by iovec or ancilliary data plus some space */
214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
215EXPORT_SYMBOL(sysctl_optmem_max);
215 216
216static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 217static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
217{ 218{
@@ -444,7 +445,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
444int sock_setsockopt(struct socket *sock, int level, int optname, 445int sock_setsockopt(struct socket *sock, int level, int optname,
445 char __user *optval, int optlen) 446 char __user *optval, int optlen)
446{ 447{
447 struct sock *sk=sock->sk; 448 struct sock *sk = sock->sk;
448 int val; 449 int val;
449 int valbool; 450 int valbool;
450 struct linger ling; 451 struct linger ling;
@@ -463,15 +464,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
463 if (get_user(val, (int __user *)optval)) 464 if (get_user(val, (int __user *)optval))
464 return -EFAULT; 465 return -EFAULT;
465 466
466 valbool = val?1:0; 467 valbool = val ? 1 : 0;
467 468
468 lock_sock(sk); 469 lock_sock(sk);
469 470
470 switch(optname) { 471 switch (optname) {
471 case SO_DEBUG: 472 case SO_DEBUG:
472 if (val && !capable(CAP_NET_ADMIN)) { 473 if (val && !capable(CAP_NET_ADMIN))
473 ret = -EACCES; 474 ret = -EACCES;
474 } else 475 else
475 sock_valbool_flag(sk, SOCK_DBG, valbool); 476 sock_valbool_flag(sk, SOCK_DBG, valbool);
476 break; 477 break;
477 case SO_REUSEADDR: 478 case SO_REUSEADDR:
@@ -582,7 +583,7 @@ set_rcvbuf:
582 ret = -EINVAL; /* 1003.1g */ 583 ret = -EINVAL; /* 1003.1g */
583 break; 584 break;
584 } 585 }
585 if (copy_from_user(&ling,optval,sizeof(ling))) { 586 if (copy_from_user(&ling, optval, sizeof(ling))) {
586 ret = -EFAULT; 587 ret = -EFAULT;
587 break; 588 break;
588 } 589 }
@@ -690,9 +691,8 @@ set_rcvbuf:
690 case SO_MARK: 691 case SO_MARK:
691 if (!capable(CAP_NET_ADMIN)) 692 if (!capable(CAP_NET_ADMIN))
692 ret = -EPERM; 693 ret = -EPERM;
693 else { 694 else
694 sk->sk_mark = val; 695 sk->sk_mark = val;
695 }
696 break; 696 break;
697 697
698 /* We implement the SO_SNDLOWAT etc to 698 /* We implement the SO_SNDLOWAT etc to
@@ -704,6 +704,7 @@ set_rcvbuf:
704 release_sock(sk); 704 release_sock(sk);
705 return ret; 705 return ret;
706} 706}
707EXPORT_SYMBOL(sock_setsockopt);
707 708
708 709
709int sock_getsockopt(struct socket *sock, int level, int optname, 710int sock_getsockopt(struct socket *sock, int level, int optname,
@@ -727,7 +728,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
727 728
728 memset(&v, 0, sizeof(v)); 729 memset(&v, 0, sizeof(v));
729 730
730 switch(optname) { 731 switch (optname) {
731 case SO_DEBUG: 732 case SO_DEBUG:
732 v.val = sock_flag(sk, SOCK_DBG); 733 v.val = sock_flag(sk, SOCK_DBG);
733 break; 734 break;
@@ -762,7 +763,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
762 763
763 case SO_ERROR: 764 case SO_ERROR:
764 v.val = -sock_error(sk); 765 v.val = -sock_error(sk);
765 if (v.val==0) 766 if (v.val == 0)
766 v.val = xchg(&sk->sk_err_soft, 0); 767 v.val = xchg(&sk->sk_err_soft, 0);
767 break; 768 break;
768 769
@@ -816,7 +817,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
816 break; 817 break;
817 818
818 case SO_RCVTIMEO: 819 case SO_RCVTIMEO:
819 lv=sizeof(struct timeval); 820 lv = sizeof(struct timeval);
820 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 821 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
821 v.tm.tv_sec = 0; 822 v.tm.tv_sec = 0;
822 v.tm.tv_usec = 0; 823 v.tm.tv_usec = 0;
@@ -827,7 +828,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
827 break; 828 break;
828 829
829 case SO_SNDTIMEO: 830 case SO_SNDTIMEO:
830 lv=sizeof(struct timeval); 831 lv = sizeof(struct timeval);
831 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 832 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
832 v.tm.tv_sec = 0; 833 v.tm.tv_sec = 0;
833 v.tm.tv_usec = 0; 834 v.tm.tv_usec = 0;
@@ -842,7 +843,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
842 break; 843 break;
843 844
844 case SO_SNDLOWAT: 845 case SO_SNDLOWAT:
845 v.val=1; 846 v.val = 1;
846 break; 847 break;
847 848
848 case SO_PASSCRED: 849 case SO_PASSCRED:
@@ -1002,6 +1003,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1002 1003
1003 return sk; 1004 return sk;
1004} 1005}
1006EXPORT_SYMBOL(sk_alloc);
1005 1007
1006void sk_free(struct sock *sk) 1008void sk_free(struct sock *sk)
1007{ 1009{
@@ -1026,6 +1028,7 @@ void sk_free(struct sock *sk)
1026 put_net(sock_net(sk)); 1028 put_net(sock_net(sk));
1027 sk_prot_free(sk->sk_prot_creator, sk); 1029 sk_prot_free(sk->sk_prot_creator, sk);
1028} 1030}
1031EXPORT_SYMBOL(sk_free);
1029 1032
1030/* 1033/*
1031 * Last sock_put should drop referrence to sk->sk_net. It has already 1034 * Last sock_put should drop referrence to sk->sk_net. It has already
@@ -1126,7 +1129,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1126out: 1129out:
1127 return newsk; 1130 return newsk;
1128} 1131}
1129
1130EXPORT_SYMBOL_GPL(sk_clone); 1132EXPORT_SYMBOL_GPL(sk_clone);
1131 1133
1132void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1134void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
@@ -1177,6 +1179,7 @@ void sock_wfree(struct sk_buff *skb)
1177 sk->sk_write_space(sk); 1179 sk->sk_write_space(sk);
1178 sock_put(sk); 1180 sock_put(sk);
1179} 1181}
1182EXPORT_SYMBOL(sock_wfree);
1180 1183
1181/* 1184/*
1182 * Read buffer destructor automatically called from kfree_skb. 1185 * Read buffer destructor automatically called from kfree_skb.
@@ -1188,6 +1191,7 @@ void sock_rfree(struct sk_buff *skb)
1188 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1191 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1189 sk_mem_uncharge(skb->sk, skb->truesize); 1192 sk_mem_uncharge(skb->sk, skb->truesize);
1190} 1193}
1194EXPORT_SYMBOL(sock_rfree);
1191 1195
1192 1196
1193int sock_i_uid(struct sock *sk) 1197int sock_i_uid(struct sock *sk)
@@ -1199,6 +1203,7 @@ int sock_i_uid(struct sock *sk)
1199 read_unlock(&sk->sk_callback_lock); 1203 read_unlock(&sk->sk_callback_lock);
1200 return uid; 1204 return uid;
1201} 1205}
1206EXPORT_SYMBOL(sock_i_uid);
1202 1207
1203unsigned long sock_i_ino(struct sock *sk) 1208unsigned long sock_i_ino(struct sock *sk)
1204{ 1209{
@@ -1209,6 +1214,7 @@ unsigned long sock_i_ino(struct sock *sk)
1209 read_unlock(&sk->sk_callback_lock); 1214 read_unlock(&sk->sk_callback_lock);
1210 return ino; 1215 return ino;
1211} 1216}
1217EXPORT_SYMBOL(sock_i_ino);
1212 1218
1213/* 1219/*
1214 * Allocate a skb from the socket's send buffer. 1220 * Allocate a skb from the socket's send buffer.
@@ -1217,7 +1223,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1217 gfp_t priority) 1223 gfp_t priority)
1218{ 1224{
1219 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1225 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1220 struct sk_buff * skb = alloc_skb(size, priority); 1226 struct sk_buff *skb = alloc_skb(size, priority);
1221 if (skb) { 1227 if (skb) {
1222 skb_set_owner_w(skb, sk); 1228 skb_set_owner_w(skb, sk);
1223 return skb; 1229 return skb;
@@ -1225,6 +1231,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1225 } 1231 }
1226 return NULL; 1232 return NULL;
1227} 1233}
1234EXPORT_SYMBOL(sock_wmalloc);
1228 1235
1229/* 1236/*
1230 * Allocate a skb from the socket's receive buffer. 1237 * Allocate a skb from the socket's receive buffer.
@@ -1261,6 +1268,7 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1261 } 1268 }
1262 return NULL; 1269 return NULL;
1263} 1270}
1271EXPORT_SYMBOL(sock_kmalloc);
1264 1272
1265/* 1273/*
1266 * Free an option memory block. 1274 * Free an option memory block.
@@ -1270,11 +1278,12 @@ void sock_kfree_s(struct sock *sk, void *mem, int size)
1270 kfree(mem); 1278 kfree(mem);
1271 atomic_sub(size, &sk->sk_omem_alloc); 1279 atomic_sub(size, &sk->sk_omem_alloc);
1272} 1280}
1281EXPORT_SYMBOL(sock_kfree_s);
1273 1282
1274/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1283/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1275 I think, these locks should be removed for datagram sockets. 1284 I think, these locks should be removed for datagram sockets.
1276 */ 1285 */
1277static long sock_wait_for_wmem(struct sock * sk, long timeo) 1286static long sock_wait_for_wmem(struct sock *sk, long timeo)
1278{ 1287{
1279 DEFINE_WAIT(wait); 1288 DEFINE_WAIT(wait);
1280 1289
@@ -1392,6 +1401,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1392{ 1401{
1393 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1402 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1394} 1403}
1404EXPORT_SYMBOL(sock_alloc_send_skb);
1395 1405
1396static void __lock_sock(struct sock *sk) 1406static void __lock_sock(struct sock *sk)
1397{ 1407{
@@ -1460,7 +1470,6 @@ int sk_wait_data(struct sock *sk, long *timeo)
1460 finish_wait(sk->sk_sleep, &wait); 1470 finish_wait(sk->sk_sleep, &wait);
1461 return rc; 1471 return rc;
1462} 1472}
1463
1464EXPORT_SYMBOL(sk_wait_data); 1473EXPORT_SYMBOL(sk_wait_data);
1465 1474
1466/** 1475/**
@@ -1541,7 +1550,6 @@ suppress_allocation:
1541 atomic_sub(amt, prot->memory_allocated); 1550 atomic_sub(amt, prot->memory_allocated);
1542 return 0; 1551 return 0;
1543} 1552}
1544
1545EXPORT_SYMBOL(__sk_mem_schedule); 1553EXPORT_SYMBOL(__sk_mem_schedule);
1546 1554
1547/** 1555/**
@@ -1560,7 +1568,6 @@ void __sk_mem_reclaim(struct sock *sk)
1560 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1568 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1561 *prot->memory_pressure = 0; 1569 *prot->memory_pressure = 0;
1562} 1570}
1563
1564EXPORT_SYMBOL(__sk_mem_reclaim); 1571EXPORT_SYMBOL(__sk_mem_reclaim);
1565 1572
1566 1573
@@ -1575,78 +1582,92 @@ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1575{ 1582{
1576 return -EOPNOTSUPP; 1583 return -EOPNOTSUPP;
1577} 1584}
1585EXPORT_SYMBOL(sock_no_bind);
1578 1586
1579int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1587int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1580 int len, int flags) 1588 int len, int flags)
1581{ 1589{
1582 return -EOPNOTSUPP; 1590 return -EOPNOTSUPP;
1583} 1591}
1592EXPORT_SYMBOL(sock_no_connect);
1584 1593
1585int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1594int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1586{ 1595{
1587 return -EOPNOTSUPP; 1596 return -EOPNOTSUPP;
1588} 1597}
1598EXPORT_SYMBOL(sock_no_socketpair);
1589 1599
1590int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1600int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1591{ 1601{
1592 return -EOPNOTSUPP; 1602 return -EOPNOTSUPP;
1593} 1603}
1604EXPORT_SYMBOL(sock_no_accept);
1594 1605
1595int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1606int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1596 int *len, int peer) 1607 int *len, int peer)
1597{ 1608{
1598 return -EOPNOTSUPP; 1609 return -EOPNOTSUPP;
1599} 1610}
1611EXPORT_SYMBOL(sock_no_getname);
1600 1612
1601unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt) 1613unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1602{ 1614{
1603 return 0; 1615 return 0;
1604} 1616}
1617EXPORT_SYMBOL(sock_no_poll);
1605 1618
1606int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1619int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1607{ 1620{
1608 return -EOPNOTSUPP; 1621 return -EOPNOTSUPP;
1609} 1622}
1623EXPORT_SYMBOL(sock_no_ioctl);
1610 1624
1611int sock_no_listen(struct socket *sock, int backlog) 1625int sock_no_listen(struct socket *sock, int backlog)
1612{ 1626{
1613 return -EOPNOTSUPP; 1627 return -EOPNOTSUPP;
1614} 1628}
1629EXPORT_SYMBOL(sock_no_listen);
1615 1630
1616int sock_no_shutdown(struct socket *sock, int how) 1631int sock_no_shutdown(struct socket *sock, int how)
1617{ 1632{
1618 return -EOPNOTSUPP; 1633 return -EOPNOTSUPP;
1619} 1634}
1635EXPORT_SYMBOL(sock_no_shutdown);
1620 1636
1621int sock_no_setsockopt(struct socket *sock, int level, int optname, 1637int sock_no_setsockopt(struct socket *sock, int level, int optname,
1622 char __user *optval, int optlen) 1638 char __user *optval, int optlen)
1623{ 1639{
1624 return -EOPNOTSUPP; 1640 return -EOPNOTSUPP;
1625} 1641}
1642EXPORT_SYMBOL(sock_no_setsockopt);
1626 1643
1627int sock_no_getsockopt(struct socket *sock, int level, int optname, 1644int sock_no_getsockopt(struct socket *sock, int level, int optname,
1628 char __user *optval, int __user *optlen) 1645 char __user *optval, int __user *optlen)
1629{ 1646{
1630 return -EOPNOTSUPP; 1647 return -EOPNOTSUPP;
1631} 1648}
1649EXPORT_SYMBOL(sock_no_getsockopt);
1632 1650
1633int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1651int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1634 size_t len) 1652 size_t len)
1635{ 1653{
1636 return -EOPNOTSUPP; 1654 return -EOPNOTSUPP;
1637} 1655}
1656EXPORT_SYMBOL(sock_no_sendmsg);
1638 1657
1639int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1658int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1640 size_t len, int flags) 1659 size_t len, int flags)
1641{ 1660{
1642 return -EOPNOTSUPP; 1661 return -EOPNOTSUPP;
1643} 1662}
1663EXPORT_SYMBOL(sock_no_recvmsg);
1644 1664
1645int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1665int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1646{ 1666{
1647 /* Mirror missing mmap method error code */ 1667 /* Mirror missing mmap method error code */
1648 return -ENODEV; 1668 return -ENODEV;
1649} 1669}
1670EXPORT_SYMBOL(sock_no_mmap);
1650 1671
1651ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1672ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1652{ 1673{
@@ -1660,6 +1681,7 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, siz
1660 kunmap(page); 1681 kunmap(page);
1661 return res; 1682 return res;
1662} 1683}
1684EXPORT_SYMBOL(sock_no_sendpage);
1663 1685
1664/* 1686/*
1665 * Default Socket Callbacks 1687 * Default Socket Callbacks
@@ -1723,6 +1745,7 @@ void sk_send_sigurg(struct sock *sk)
1723 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1745 if (send_sigurg(&sk->sk_socket->file->f_owner))
1724 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1746 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1725} 1747}
1748EXPORT_SYMBOL(sk_send_sigurg);
1726 1749
1727void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1750void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1728 unsigned long expires) 1751 unsigned long expires)
@@ -1730,7 +1753,6 @@ void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1730 if (!mod_timer(timer, expires)) 1753 if (!mod_timer(timer, expires))
1731 sock_hold(sk); 1754 sock_hold(sk);
1732} 1755}
1733
1734EXPORT_SYMBOL(sk_reset_timer); 1756EXPORT_SYMBOL(sk_reset_timer);
1735 1757
1736void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1758void sk_stop_timer(struct sock *sk, struct timer_list* timer)
@@ -1738,7 +1760,6 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1738 if (timer_pending(timer) && del_timer(timer)) 1760 if (timer_pending(timer) && del_timer(timer))
1739 __sock_put(sk); 1761 __sock_put(sk);
1740} 1762}
1741
1742EXPORT_SYMBOL(sk_stop_timer); 1763EXPORT_SYMBOL(sk_stop_timer);
1743 1764
1744void sock_init_data(struct socket *sock, struct sock *sk) 1765void sock_init_data(struct socket *sock, struct sock *sk)
@@ -1797,6 +1818,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1797 atomic_set(&sk->sk_refcnt, 1); 1818 atomic_set(&sk->sk_refcnt, 1);
1798 atomic_set(&sk->sk_drops, 0); 1819 atomic_set(&sk->sk_drops, 0);
1799} 1820}
1821EXPORT_SYMBOL(sock_init_data);
1800 1822
1801void lock_sock_nested(struct sock *sk, int subclass) 1823void lock_sock_nested(struct sock *sk, int subclass)
1802{ 1824{
@@ -1812,7 +1834,6 @@ void lock_sock_nested(struct sock *sk, int subclass)
1812 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 1834 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
1813 local_bh_enable(); 1835 local_bh_enable();
1814} 1836}
1815
1816EXPORT_SYMBOL(lock_sock_nested); 1837EXPORT_SYMBOL(lock_sock_nested);
1817 1838
1818void release_sock(struct sock *sk) 1839void release_sock(struct sock *sk)
@@ -1895,7 +1916,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
1895 1916
1896 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 1917 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1897} 1918}
1898
1899EXPORT_SYMBOL(sock_common_getsockopt); 1919EXPORT_SYMBOL(sock_common_getsockopt);
1900 1920
1901#ifdef CONFIG_COMPAT 1921#ifdef CONFIG_COMPAT
@@ -1925,7 +1945,6 @@ int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1925 msg->msg_namelen = addr_len; 1945 msg->msg_namelen = addr_len;
1926 return err; 1946 return err;
1927} 1947}
1928
1929EXPORT_SYMBOL(sock_common_recvmsg); 1948EXPORT_SYMBOL(sock_common_recvmsg);
1930 1949
1931/* 1950/*
@@ -1938,7 +1957,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname,
1938 1957
1939 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 1958 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1940} 1959}
1941
1942EXPORT_SYMBOL(sock_common_setsockopt); 1960EXPORT_SYMBOL(sock_common_setsockopt);
1943 1961
1944#ifdef CONFIG_COMPAT 1962#ifdef CONFIG_COMPAT
@@ -1989,7 +2007,6 @@ void sk_common_release(struct sock *sk)
1989 sk_refcnt_debug_release(sk); 2007 sk_refcnt_debug_release(sk);
1990 sock_put(sk); 2008 sock_put(sk);
1991} 2009}
1992
1993EXPORT_SYMBOL(sk_common_release); 2010EXPORT_SYMBOL(sk_common_release);
1994 2011
1995static DEFINE_RWLOCK(proto_list_lock); 2012static DEFINE_RWLOCK(proto_list_lock);
@@ -2171,7 +2188,6 @@ out_free_sock_slab:
2171out: 2188out:
2172 return -ENOBUFS; 2189 return -ENOBUFS;
2173} 2190}
2174
2175EXPORT_SYMBOL(proto_register); 2191EXPORT_SYMBOL(proto_register);
2176 2192
2177void proto_unregister(struct proto *prot) 2193void proto_unregister(struct proto *prot)
@@ -2198,7 +2214,6 @@ void proto_unregister(struct proto *prot)
2198 prot->twsk_prot->twsk_slab = NULL; 2214 prot->twsk_prot->twsk_slab = NULL;
2199 } 2215 }
2200} 2216}
2201
2202EXPORT_SYMBOL(proto_unregister); 2217EXPORT_SYMBOL(proto_unregister);
2203 2218
2204#ifdef CONFIG_PROC_FS 2219#ifdef CONFIG_PROC_FS
@@ -2324,33 +2339,3 @@ static int __init proto_init(void)
2324subsys_initcall(proto_init); 2339subsys_initcall(proto_init);
2325 2340
2326#endif /* PROC_FS */ 2341#endif /* PROC_FS */
2327
2328EXPORT_SYMBOL(sk_alloc);
2329EXPORT_SYMBOL(sk_free);
2330EXPORT_SYMBOL(sk_send_sigurg);
2331EXPORT_SYMBOL(sock_alloc_send_skb);
2332EXPORT_SYMBOL(sock_init_data);
2333EXPORT_SYMBOL(sock_kfree_s);
2334EXPORT_SYMBOL(sock_kmalloc);
2335EXPORT_SYMBOL(sock_no_accept);
2336EXPORT_SYMBOL(sock_no_bind);
2337EXPORT_SYMBOL(sock_no_connect);
2338EXPORT_SYMBOL(sock_no_getname);
2339EXPORT_SYMBOL(sock_no_getsockopt);
2340EXPORT_SYMBOL(sock_no_ioctl);
2341EXPORT_SYMBOL(sock_no_listen);
2342EXPORT_SYMBOL(sock_no_mmap);
2343EXPORT_SYMBOL(sock_no_poll);
2344EXPORT_SYMBOL(sock_no_recvmsg);
2345EXPORT_SYMBOL(sock_no_sendmsg);
2346EXPORT_SYMBOL(sock_no_sendpage);
2347EXPORT_SYMBOL(sock_no_setsockopt);
2348EXPORT_SYMBOL(sock_no_shutdown);
2349EXPORT_SYMBOL(sock_no_socketpair);
2350EXPORT_SYMBOL(sock_rfree);
2351EXPORT_SYMBOL(sock_setsockopt);
2352EXPORT_SYMBOL(sock_wfree);
2353EXPORT_SYMBOL(sock_wmalloc);
2354EXPORT_SYMBOL(sock_i_uid);
2355EXPORT_SYMBOL(sock_i_ino);
2356EXPORT_SYMBOL(sysctl_optmem_max);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9647d911f916..bccb3887773e 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1250,14 +1250,8 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1250 if (skb) { 1250 if (skb) {
1251 amount = skb->len; 1251 amount = skb->len;
1252 } else { 1252 } else {
1253 skb = sk->sk_receive_queue.next; 1253 skb_queue_walk(&sk->sk_receive_queue, skb)
1254 for (;;) {
1255 if (skb ==
1256 (struct sk_buff *)&sk->sk_receive_queue)
1257 break;
1258 amount += skb->len; 1254 amount += skb->len;
1259 skb = skb->next;
1260 }
1261 } 1255 }
1262 release_sock(sk); 1256 release_sock(sk);
1263 err = put_user(amount, (int __user *)arg); 1257 err = put_user(amount, (int __user *)arg);
@@ -1644,13 +1638,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1644 1638
1645static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) 1639static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1646{ 1640{
1647 struct sk_buff *skb = q->next; 1641 struct sk_buff *skb;
1648 int len = 0; 1642 int len = 0;
1649 1643
1650 if (flags & MSG_OOB) 1644 if (flags & MSG_OOB)
1651 return !skb_queue_empty(q) ? 1 : 0; 1645 return !skb_queue_empty(q) ? 1 : 0;
1652 1646
1653 while(skb != (struct sk_buff *)q) { 1647 skb_queue_walk(q, skb) {
1654 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1648 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1655 len += skb->len; 1649 len += skb->len;
1656 1650
@@ -1666,8 +1660,6 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
1666 /* minimum data length for read exceeded */ 1660 /* minimum data length for read exceeded */
1667 if (len >= target) 1661 if (len >= target)
1668 return 1; 1662 return 1;
1669
1670 skb = skb->next;
1671 } 1663 }
1672 1664
1673 return 0; 1665 return 0;
@@ -1683,7 +1675,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1683 size_t target = size > 1 ? 1 : 0; 1675 size_t target = size > 1 ? 1 : 0;
1684 size_t copied = 0; 1676 size_t copied = 0;
1685 int rv = 0; 1677 int rv = 0;
1686 struct sk_buff *skb, *nskb; 1678 struct sk_buff *skb, *n;
1687 struct dn_skb_cb *cb = NULL; 1679 struct dn_skb_cb *cb = NULL;
1688 unsigned char eor = 0; 1680 unsigned char eor = 0;
1689 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1681 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
@@ -1758,7 +1750,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1758 finish_wait(sk->sk_sleep, &wait); 1750 finish_wait(sk->sk_sleep, &wait);
1759 } 1751 }
1760 1752
1761 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { 1753 skb_queue_walk_safe(queue, skb, n) {
1762 unsigned int chunk = skb->len; 1754 unsigned int chunk = skb->len;
1763 cb = DN_SKB_CB(skb); 1755 cb = DN_SKB_CB(skb);
1764 1756
@@ -1775,7 +1767,6 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1775 skb_pull(skb, chunk); 1767 skb_pull(skb, chunk);
1776 1768
1777 eor = cb->nsp_flags & 0x40; 1769 eor = cb->nsp_flags & 0x40;
1778 nskb = skb->next;
1779 1770
1780 if (skb->len == 0) { 1771 if (skb->len == 0) {
1781 skb_unlink(skb, queue); 1772 skb_unlink(skb, queue);
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 2013c25b7f5a..da04f459337e 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -382,7 +382,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
382{ 382{
383 struct dn_skb_cb *cb = DN_SKB_CB(skb); 383 struct dn_skb_cb *cb = DN_SKB_CB(skb);
384 struct dn_scp *scp = DN_SK(sk); 384 struct dn_scp *scp = DN_SK(sk);
385 struct sk_buff *skb2, *list, *ack = NULL; 385 struct sk_buff *skb2, *n, *ack = NULL;
386 int wakeup = 0; 386 int wakeup = 0;
387 int try_retrans = 0; 387 int try_retrans = 0;
388 unsigned long reftime = cb->stamp; 388 unsigned long reftime = cb->stamp;
@@ -390,9 +390,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
390 unsigned short xmit_count; 390 unsigned short xmit_count;
391 unsigned short segnum; 391 unsigned short segnum;
392 392
393 skb2 = q->next; 393 skb_queue_walk_safe(q, skb2, n) {
394 list = (struct sk_buff *)q;
395 while(list != skb2) {
396 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); 394 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
397 395
398 if (dn_before_or_equal(cb2->segnum, acknum)) 396 if (dn_before_or_equal(cb2->segnum, acknum))
@@ -400,8 +398,6 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
400 398
401 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ 399 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */
402 400
403 skb2 = skb2->next;
404
405 if (ack == NULL) 401 if (ack == NULL)
406 continue; 402 continue;
407 403
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 6f479fa522c3..8121bf0029e3 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -901,15 +901,10 @@ static void aun_tx_ack(unsigned long seq, int result)
901 struct ec_cb *eb; 901 struct ec_cb *eb;
902 902
903 spin_lock_irqsave(&aun_queue_lock, flags); 903 spin_lock_irqsave(&aun_queue_lock, flags);
904 skb = skb_peek(&aun_queue); 904 skb_queue_walk(&aun_queue, skb) {
905 while (skb && skb != (struct sk_buff *)&aun_queue)
906 {
907 struct sk_buff *newskb = skb->next;
908 eb = (struct ec_cb *)&skb->cb; 905 eb = (struct ec_cb *)&skb->cb;
909 if (eb->seq == seq) 906 if (eb->seq == seq)
910 goto foundit; 907 goto foundit;
911
912 skb = newskb;
913 } 908 }
914 spin_unlock_irqrestore(&aun_queue_lock, flags); 909 spin_unlock_irqrestore(&aun_queue_lock, flags);
915 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); 910 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
@@ -982,23 +977,18 @@ static void aun_data_available(struct sock *sk, int slen)
982 977
983static void ab_cleanup(unsigned long h) 978static void ab_cleanup(unsigned long h)
984{ 979{
985 struct sk_buff *skb; 980 struct sk_buff *skb, *n;
986 unsigned long flags; 981 unsigned long flags;
987 982
988 spin_lock_irqsave(&aun_queue_lock, flags); 983 spin_lock_irqsave(&aun_queue_lock, flags);
989 skb = skb_peek(&aun_queue); 984 skb_queue_walk_safe(&aun_queue, skb, n) {
990 while (skb && skb != (struct sk_buff *)&aun_queue)
991 {
992 struct sk_buff *newskb = skb->next;
993 struct ec_cb *eb = (struct ec_cb *)&skb->cb; 985 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
994 if ((jiffies - eb->start) > eb->timeout) 986 if ((jiffies - eb->start) > eb->timeout) {
995 {
996 tx_result(skb->sk, eb->cookie, 987 tx_result(skb->sk, eb->cookie,
997 ECTYPE_TRANSMIT_NOT_PRESENT); 988 ECTYPE_TRANSMIT_NOT_PRESENT);
998 skb_unlink(skb, &aun_queue); 989 skb_unlink(skb, &aun_queue);
999 kfree_skb(skb); 990 kfree_skb(skb);
1000 } 991 }
1001 skb = newskb;
1002 } 992 }
1003 spin_unlock_irqrestore(&aun_queue_lock, flags); 993 spin_unlock_irqrestore(&aun_queue_lock, flags);
1004 994
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 280352aba403..5a883affecd3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -337,11 +337,6 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
337void ether_setup(struct net_device *dev) 337void ether_setup(struct net_device *dev)
338{ 338{
339 dev->header_ops = &eth_header_ops; 339 dev->header_ops = &eth_header_ops;
340#ifdef CONFIG_COMPAT_NET_DEV_OPS
341 dev->change_mtu = eth_change_mtu;
342 dev->set_mac_address = eth_mac_addr;
343 dev->validate_addr = eth_validate_addr;
344#endif
345 dev->type = ARPHRD_ETHER; 340 dev->type = ARPHRD_ETHER;
346 dev->hard_header_len = ETH_HLEN; 341 dev->hard_header_len = ETH_HLEN;
347 dev->mtu = ETH_DATA_LEN; 342 dev->mtu = ETH_DATA_LEN;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9d26a3da37e5..5b919f7b45db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -408,7 +408,7 @@ config INET_XFRM_MODE_BEET
408 408
409config INET_LRO 409config INET_LRO
410 bool "Large Receive Offload (ipv4/tcp)" 410 bool "Large Receive Offload (ipv4/tcp)"
411 411 default y
412 ---help--- 412 ---help---
413 Support for Large Receive Offload (ipv4/tcp). 413 Support for Large Receive Offload (ipv4/tcp).
414 414
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 170689681aa2..5abee4c97449 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1246,13 +1246,20 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1246 struct sk_buff **pp = NULL; 1246 struct sk_buff **pp = NULL;
1247 struct sk_buff *p; 1247 struct sk_buff *p;
1248 struct iphdr *iph; 1248 struct iphdr *iph;
1249 unsigned int hlen;
1250 unsigned int off;
1251 unsigned int id;
1249 int flush = 1; 1252 int flush = 1;
1250 int proto; 1253 int proto;
1251 int id;
1252 1254
1253 iph = skb_gro_header(skb, sizeof(*iph)); 1255 off = skb_gro_offset(skb);
1254 if (unlikely(!iph)) 1256 hlen = off + sizeof(*iph);
1255 goto out; 1257 iph = skb_gro_header_fast(skb, off);
1258 if (skb_gro_header_hard(skb, hlen)) {
1259 iph = skb_gro_header_slow(skb, hlen, off);
1260 if (unlikely(!iph))
1261 goto out;
1262 }
1256 1263
1257 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1264 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1258 1265
@@ -1267,9 +1274,9 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1267 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1274 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1268 goto out_unlock; 1275 goto out_unlock;
1269 1276
1270 flush = ntohs(iph->tot_len) != skb_gro_len(skb) || 1277 id = ntohl(*(u32 *)&iph->id);
1271 iph->frag_off != htons(IP_DF); 1278 flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1272 id = ntohs(iph->id); 1279 id >>= 16;
1273 1280
1274 for (p = *head; p; p = p->next) { 1281 for (p = *head; p; p = p->next) {
1275 struct iphdr *iph2; 1282 struct iphdr *iph2;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 9070d11058e5..538d2a9a5115 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key)
986static struct node *trie_rebalance(struct trie *t, struct tnode *tn) 986static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
987{ 987{
988 int wasfull; 988 int wasfull;
989 t_key cindex, key = tn->key; 989 t_key cindex, key;
990 struct tnode *tp; 990 struct tnode *tp;
991 991
992 preempt_disable();
993 key = tn->key;
994
992 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { 995 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
993 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 996 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
994 wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); 997 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
1007 if (IS_TNODE(tn)) 1010 if (IS_TNODE(tn))
1008 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1011 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1009 1012
1013 preempt_enable();
1010 return (struct node *)tn; 1014 return (struct node *)tn;
1011} 1015}
1012 1016
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c4c60e9f068a..28205e5bfa9b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -784,8 +784,8 @@ static void rt_check_expire(void)
784{ 784{
785 static unsigned int rover; 785 static unsigned int rover;
786 unsigned int i = rover, goal; 786 unsigned int i = rover, goal;
787 struct rtable *rth, **rthp; 787 struct rtable *rth, *aux, **rthp;
788 unsigned long length = 0, samples = 0; 788 unsigned long samples = 0;
789 unsigned long sum = 0, sum2 = 0; 789 unsigned long sum = 0, sum2 = 0;
790 u64 mult; 790 u64 mult;
791 791
@@ -795,9 +795,9 @@ static void rt_check_expire(void)
795 goal = (unsigned int)mult; 795 goal = (unsigned int)mult;
796 if (goal > rt_hash_mask) 796 if (goal > rt_hash_mask)
797 goal = rt_hash_mask + 1; 797 goal = rt_hash_mask + 1;
798 length = 0;
799 for (; goal > 0; goal--) { 798 for (; goal > 0; goal--) {
800 unsigned long tmo = ip_rt_gc_timeout; 799 unsigned long tmo = ip_rt_gc_timeout;
800 unsigned long length;
801 801
802 i = (i + 1) & rt_hash_mask; 802 i = (i + 1) & rt_hash_mask;
803 rthp = &rt_hash_table[i].chain; 803 rthp = &rt_hash_table[i].chain;
@@ -809,8 +809,10 @@ static void rt_check_expire(void)
809 809
810 if (*rthp == NULL) 810 if (*rthp == NULL)
811 continue; 811 continue;
812 length = 0;
812 spin_lock_bh(rt_hash_lock_addr(i)); 813 spin_lock_bh(rt_hash_lock_addr(i));
813 while ((rth = *rthp) != NULL) { 814 while ((rth = *rthp) != NULL) {
815 prefetch(rth->u.dst.rt_next);
814 if (rt_is_expired(rth)) { 816 if (rt_is_expired(rth)) {
815 *rthp = rth->u.dst.rt_next; 817 *rthp = rth->u.dst.rt_next;
816 rt_free(rth); 818 rt_free(rth);
@@ -819,33 +821,30 @@ static void rt_check_expire(void)
819 if (rth->u.dst.expires) { 821 if (rth->u.dst.expires) {
820 /* Entry is expired even if it is in use */ 822 /* Entry is expired even if it is in use */
821 if (time_before_eq(jiffies, rth->u.dst.expires)) { 823 if (time_before_eq(jiffies, rth->u.dst.expires)) {
824nofree:
822 tmo >>= 1; 825 tmo >>= 1;
823 rthp = &rth->u.dst.rt_next; 826 rthp = &rth->u.dst.rt_next;
824 /* 827 /*
825 * Only bump our length if the hash 828 * We only count entries on
826 * inputs on entries n and n+1 are not
827 * the same, we only count entries on
828 * a chain with equal hash inputs once 829 * a chain with equal hash inputs once
829 * so that entries for different QOS 830 * so that entries for different QOS
830 * levels, and other non-hash input 831 * levels, and other non-hash input
831 * attributes don't unfairly skew 832 * attributes don't unfairly skew
832 * the length computation 833 * the length computation
833 */ 834 */
834 if ((*rthp == NULL) || 835 for (aux = rt_hash_table[i].chain;;) {
835 !compare_hash_inputs(&(*rthp)->fl, 836 if (aux == rth) {
836 &rth->fl)) 837 length += ONE;
837 length += ONE; 838 break;
839 }
840 if (compare_hash_inputs(&aux->fl, &rth->fl))
841 break;
842 aux = aux->u.dst.rt_next;
843 }
838 continue; 844 continue;
839 } 845 }
840 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { 846 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
841 tmo >>= 1; 847 goto nofree;
842 rthp = &rth->u.dst.rt_next;
843 if ((*rthp == NULL) ||
844 !compare_hash_inputs(&(*rthp)->fl,
845 &rth->fl))
846 length += ONE;
847 continue;
848 }
849 848
850 /* Cleanup aged off entries. */ 849 /* Cleanup aged off entries. */
851 *rthp = rth->u.dst.rt_next; 850 *rthp = rth->u.dst.rt_next;
@@ -1068,7 +1067,6 @@ out: return 0;
1068static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) 1067static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
1069{ 1068{
1070 struct rtable *rth, **rthp; 1069 struct rtable *rth, **rthp;
1071 struct rtable *rthi;
1072 unsigned long now; 1070 unsigned long now;
1073 struct rtable *cand, **candp; 1071 struct rtable *cand, **candp;
1074 u32 min_score; 1072 u32 min_score;
@@ -1088,7 +1086,6 @@ restart:
1088 } 1086 }
1089 1087
1090 rthp = &rt_hash_table[hash].chain; 1088 rthp = &rt_hash_table[hash].chain;
1091 rthi = NULL;
1092 1089
1093 spin_lock_bh(rt_hash_lock_addr(hash)); 1090 spin_lock_bh(rt_hash_lock_addr(hash));
1094 while ((rth = *rthp) != NULL) { 1091 while ((rth = *rthp) != NULL) {
@@ -1134,17 +1131,6 @@ restart:
1134 chain_length++; 1131 chain_length++;
1135 1132
1136 rthp = &rth->u.dst.rt_next; 1133 rthp = &rth->u.dst.rt_next;
1137
1138 /*
1139 * check to see if the next entry in the chain
1140 * contains the same hash input values as rt. If it does
1141 * This is where we will insert into the list, instead of
1142 * at the head. This groups entries that differ by aspects not
1143 * relvant to the hash function together, which we use to adjust
1144 * our chain length
1145 */
1146 if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
1147 rthi = rth;
1148 } 1134 }
1149 1135
1150 if (cand) { 1136 if (cand) {
@@ -1205,10 +1191,7 @@ restart:
1205 } 1191 }
1206 } 1192 }
1207 1193
1208 if (rthi) 1194 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1209 rt->u.dst.rt_next = rthi->u.dst.rt_next;
1210 else
1211 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1212 1195
1213#if RT_CACHE_DEBUG >= 2 1196#if RT_CACHE_DEBUG >= 2
1214 if (rt->u.dst.rt_next) { 1197 if (rt->u.dst.rt_next) {
@@ -1224,10 +1207,7 @@ restart:
1224 * previous writes to rt are comitted to memory 1207 * previous writes to rt are comitted to memory
1225 * before making rt visible to other CPUS. 1208 * before making rt visible to other CPUS.
1226 */ 1209 */
1227 if (rthi) 1210 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1228 rcu_assign_pointer(rthi->u.dst.rt_next, rt);
1229 else
1230 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1231 1211
1232 spin_unlock_bh(rt_hash_lock_addr(hash)); 1212 spin_unlock_bh(rt_hash_lock_addr(hash));
1233 *rp = rt; 1213 *rp = rt;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7a0f0b27bf1f..17b89c523f9d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
439 !tp->urg_data || 439 !tp->urg_data ||
440 before(tp->urg_seq, tp->copied_seq) || 440 before(tp->urg_seq, tp->copied_seq) ||
441 !before(tp->urg_seq, tp->rcv_nxt)) { 441 !before(tp->urg_seq, tp->rcv_nxt)) {
442 struct sk_buff *skb;
443
442 answ = tp->rcv_nxt - tp->copied_seq; 444 answ = tp->rcv_nxt - tp->copied_seq;
443 445
444 /* Subtract 1, if FIN is in queue. */ 446 /* Subtract 1, if FIN is in queue. */
445 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 447 skb = skb_peek_tail(&sk->sk_receive_queue);
446 answ -= 448 if (answ && skb)
447 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; 449 answ -= tcp_hdr(skb)->fin;
448 } else 450 } else
449 answ = tp->urg_seq - tp->copied_seq; 451 answ = tp->urg_seq - tp->copied_seq;
450 release_sock(sk); 452 release_sock(sk);
@@ -1382,11 +1384,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1382 1384
1383 /* Next get a buffer. */ 1385 /* Next get a buffer. */
1384 1386
1385 skb = skb_peek(&sk->sk_receive_queue); 1387 skb_queue_walk(&sk->sk_receive_queue, skb) {
1386 do {
1387 if (!skb)
1388 break;
1389
1390 /* Now that we have two receive queues this 1388 /* Now that we have two receive queues this
1391 * shouldn't happen. 1389 * shouldn't happen.
1392 */ 1390 */
@@ -1403,8 +1401,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1403 if (tcp_hdr(skb)->fin) 1401 if (tcp_hdr(skb)->fin)
1404 goto found_fin_ok; 1402 goto found_fin_ok;
1405 WARN_ON(!(flags & MSG_PEEK)); 1403 WARN_ON(!(flags & MSG_PEEK));
1406 skb = skb->next; 1404 }
1407 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1408 1405
1409 /* Well, if we have backlog, try to process it now yet. */ 1406 /* Well, if we have backlog, try to process it now yet. */
1410 1407
@@ -2518,20 +2515,30 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2518 unsigned int thlen; 2515 unsigned int thlen;
2519 unsigned int flags; 2516 unsigned int flags;
2520 unsigned int mss = 1; 2517 unsigned int mss = 1;
2518 unsigned int hlen;
2519 unsigned int off;
2521 int flush = 1; 2520 int flush = 1;
2522 int i; 2521 int i;
2523 2522
2524 th = skb_gro_header(skb, sizeof(*th)); 2523 off = skb_gro_offset(skb);
2525 if (unlikely(!th)) 2524 hlen = off + sizeof(*th);
2526 goto out; 2525 th = skb_gro_header_fast(skb, off);
2526 if (skb_gro_header_hard(skb, hlen)) {
2527 th = skb_gro_header_slow(skb, hlen, off);
2528 if (unlikely(!th))
2529 goto out;
2530 }
2527 2531
2528 thlen = th->doff * 4; 2532 thlen = th->doff * 4;
2529 if (thlen < sizeof(*th)) 2533 if (thlen < sizeof(*th))
2530 goto out; 2534 goto out;
2531 2535
2532 th = skb_gro_header(skb, thlen); 2536 hlen = off + thlen;
2533 if (unlikely(!th)) 2537 if (skb_gro_header_hard(skb, hlen)) {
2534 goto out; 2538 th = skb_gro_header_slow(skb, hlen, off);
2539 if (unlikely(!th))
2540 goto out;
2541 }
2535 2542
2536 skb_gro_pull(skb, thlen); 2543 skb_gro_pull(skb, thlen);
2537 2544
@@ -2544,7 +2551,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2544 2551
2545 th2 = tcp_hdr(p); 2552 th2 = tcp_hdr(p);
2546 2553
2547 if ((th->source ^ th2->source) | (th->dest ^ th2->dest)) { 2554 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
2548 NAPI_GRO_CB(p)->same_flow = 0; 2555 NAPI_GRO_CB(p)->same_flow = 0;
2549 continue; 2556 continue;
2550 } 2557 }
@@ -2559,14 +2566,14 @@ found:
2559 flush |= flags & TCP_FLAG_CWR; 2566 flush |= flags & TCP_FLAG_CWR;
2560 flush |= (flags ^ tcp_flag_word(th2)) & 2567 flush |= (flags ^ tcp_flag_word(th2)) &
2561 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2568 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
2562 flush |= (th->ack_seq ^ th2->ack_seq) | (th->window ^ th2->window); 2569 flush |= th->ack_seq ^ th2->ack_seq;
2563 for (i = sizeof(*th); !flush && i < thlen; i += 4) 2570 for (i = sizeof(*th); i < thlen; i += 4)
2564 flush |= *(u32 *)((u8 *)th + i) ^ 2571 flush |= *(u32 *)((u8 *)th + i) ^
2565 *(u32 *)((u8 *)th2 + i); 2572 *(u32 *)((u8 *)th2 + i);
2566 2573
2567 mss = skb_shinfo(p)->gso_size; 2574 mss = skb_shinfo(p)->gso_size;
2568 2575
2569 flush |= (len > mss) | !len; 2576 flush |= (len - 1) >= mss;
2570 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 2577 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2571 2578
2572 if (flush || skb_gro_receive(head, skb)) { 2579 if (flush || skb_gro_receive(head, skb)) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eeb8a92aa416..ba34a23c1bfb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4426,7 +4426,7 @@ drop:
4426 } 4426 }
4427 __skb_queue_head(&tp->out_of_order_queue, skb); 4427 __skb_queue_head(&tp->out_of_order_queue, skb);
4428 } else { 4428 } else {
4429 struct sk_buff *skb1 = tp->out_of_order_queue.prev; 4429 struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
4430 u32 seq = TCP_SKB_CB(skb)->seq; 4430 u32 seq = TCP_SKB_CB(skb)->seq;
4431 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4431 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4432 4432
@@ -4443,15 +4443,18 @@ drop:
4443 } 4443 }
4444 4444
4445 /* Find place to insert this segment. */ 4445 /* Find place to insert this segment. */
4446 do { 4446 while (1) {
4447 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4447 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4448 break; 4448 break;
4449 } while ((skb1 = skb1->prev) != 4449 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4450 (struct sk_buff *)&tp->out_of_order_queue); 4450 skb1 = NULL;
4451 break;
4452 }
4453 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4454 }
4451 4455
4452 /* Do skb overlap to previous one? */ 4456 /* Do skb overlap to previous one? */
4453 if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && 4457 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4454 before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4455 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4458 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4456 /* All the bits are present. Drop. */ 4459 /* All the bits are present. Drop. */
4457 __kfree_skb(skb); 4460 __kfree_skb(skb);
@@ -4463,24 +4466,41 @@ drop:
4463 tcp_dsack_set(sk, seq, 4466 tcp_dsack_set(sk, seq,
4464 TCP_SKB_CB(skb1)->end_seq); 4467 TCP_SKB_CB(skb1)->end_seq);
4465 } else { 4468 } else {
4466 skb1 = skb1->prev; 4469 if (skb_queue_is_first(&tp->out_of_order_queue,
4470 skb1))
4471 skb1 = NULL;
4472 else
4473 skb1 = skb_queue_prev(
4474 &tp->out_of_order_queue,
4475 skb1);
4467 } 4476 }
4468 } 4477 }
4469 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4478 if (!skb1)
4479 __skb_queue_head(&tp->out_of_order_queue, skb);
4480 else
4481 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4470 4482
4471 /* And clean segments covered by new one as whole. */ 4483 /* And clean segments covered by new one as whole. */
4472 while ((skb1 = skb->next) != 4484 if (skb1 && !skb_queue_is_last(&tp->out_of_order_queue, skb1)) {
4473 (struct sk_buff *)&tp->out_of_order_queue && 4485 struct sk_buff *n;
4474 after(end_seq, TCP_SKB_CB(skb1)->seq)) { 4486
4475 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4487 skb1 = skb_queue_next(&tp->out_of_order_queue, skb1);
4488 skb_queue_walk_from_safe(&tp->out_of_order_queue,
4489 skb1, n) {
4490 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4491 break;
4492 if (before(end_seq,
4493 TCP_SKB_CB(skb1)->end_seq)) {
4494 tcp_dsack_extend(sk,
4495 TCP_SKB_CB(skb1)->seq,
4496 end_seq);
4497 break;
4498 }
4499 __skb_unlink(skb1, &tp->out_of_order_queue);
4476 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4500 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4477 end_seq); 4501 TCP_SKB_CB(skb1)->end_seq);
4478 break; 4502 __kfree_skb(skb1);
4479 } 4503 }
4480 __skb_unlink(skb1, &tp->out_of_order_queue);
4481 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4482 TCP_SKB_CB(skb1)->end_seq);
4483 __kfree_skb(skb1);
4484 } 4504 }
4485 4505
4486add_sack: 4506add_sack:
@@ -4492,7 +4512,10 @@ add_sack:
4492static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4512static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4493 struct sk_buff_head *list) 4513 struct sk_buff_head *list)
4494{ 4514{
4495 struct sk_buff *next = skb->next; 4515 struct sk_buff *next = NULL;
4516
4517 if (!skb_queue_is_last(list, skb))
4518 next = skb_queue_next(list, skb);
4496 4519
4497 __skb_unlink(skb, list); 4520 __skb_unlink(skb, list);
4498 __kfree_skb(skb); 4521 __kfree_skb(skb);
@@ -4503,6 +4526,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4503 4526
4504/* Collapse contiguous sequence of skbs head..tail with 4527/* Collapse contiguous sequence of skbs head..tail with
4505 * sequence numbers start..end. 4528 * sequence numbers start..end.
4529 *
4530 * If tail is NULL, this means until the end of the list.
4531 *
4506 * Segments with FIN/SYN are not collapsed (only because this 4532 * Segments with FIN/SYN are not collapsed (only because this
4507 * simplifies code) 4533 * simplifies code)
4508 */ 4534 */
@@ -4511,15 +4537,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4511 struct sk_buff *head, struct sk_buff *tail, 4537 struct sk_buff *head, struct sk_buff *tail,
4512 u32 start, u32 end) 4538 u32 start, u32 end)
4513{ 4539{
4514 struct sk_buff *skb; 4540 struct sk_buff *skb, *n;
4541 bool end_of_skbs;
4515 4542
4516 /* First, check that queue is collapsible and find 4543 /* First, check that queue is collapsible and find
4517 * the point where collapsing can be useful. */ 4544 * the point where collapsing can be useful. */
4518 for (skb = head; skb != tail;) { 4545 skb = head;
4546restart:
4547 end_of_skbs = true;
4548 skb_queue_walk_from_safe(list, skb, n) {
4549 if (skb == tail)
4550 break;
4519 /* No new bits? It is possible on ofo queue. */ 4551 /* No new bits? It is possible on ofo queue. */
4520 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4552 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4521 skb = tcp_collapse_one(sk, skb, list); 4553 skb = tcp_collapse_one(sk, skb, list);
4522 continue; 4554 if (!skb)
4555 break;
4556 goto restart;
4523 } 4557 }
4524 4558
4525 /* The first skb to collapse is: 4559 /* The first skb to collapse is:
@@ -4529,16 +4563,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4529 */ 4563 */
4530 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4564 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
4531 (tcp_win_from_space(skb->truesize) > skb->len || 4565 (tcp_win_from_space(skb->truesize) > skb->len ||
4532 before(TCP_SKB_CB(skb)->seq, start) || 4566 before(TCP_SKB_CB(skb)->seq, start))) {
4533 (skb->next != tail && 4567 end_of_skbs = false;
4534 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
4535 break; 4568 break;
4569 }
4570
4571 if (!skb_queue_is_last(list, skb)) {
4572 struct sk_buff *next = skb_queue_next(list, skb);
4573 if (next != tail &&
4574 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
4575 end_of_skbs = false;
4576 break;
4577 }
4578 }
4536 4579
4537 /* Decided to skip this, advance start seq. */ 4580 /* Decided to skip this, advance start seq. */
4538 start = TCP_SKB_CB(skb)->end_seq; 4581 start = TCP_SKB_CB(skb)->end_seq;
4539 skb = skb->next;
4540 } 4582 }
4541 if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4583 if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
4542 return; 4584 return;
4543 4585
4544 while (before(start, end)) { 4586 while (before(start, end)) {
@@ -4583,7 +4625,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4583 } 4625 }
4584 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4626 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4585 skb = tcp_collapse_one(sk, skb, list); 4627 skb = tcp_collapse_one(sk, skb, list);
4586 if (skb == tail || 4628 if (!skb ||
4629 skb == tail ||
4587 tcp_hdr(skb)->syn || 4630 tcp_hdr(skb)->syn ||
4588 tcp_hdr(skb)->fin) 4631 tcp_hdr(skb)->fin)
4589 return; 4632 return;
@@ -4610,17 +4653,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4610 head = skb; 4653 head = skb;
4611 4654
4612 for (;;) { 4655 for (;;) {
4613 skb = skb->next; 4656 struct sk_buff *next = NULL;
4657
4658 if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
4659 next = skb_queue_next(&tp->out_of_order_queue, skb);
4660 skb = next;
4614 4661
4615 /* Segment is terminated when we see gap or when 4662 /* Segment is terminated when we see gap or when
4616 * we are at the end of all the queue. */ 4663 * we are at the end of all the queue. */
4617 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 4664 if (!skb ||
4618 after(TCP_SKB_CB(skb)->seq, end) || 4665 after(TCP_SKB_CB(skb)->seq, end) ||
4619 before(TCP_SKB_CB(skb)->end_seq, start)) { 4666 before(TCP_SKB_CB(skb)->end_seq, start)) {
4620 tcp_collapse(sk, &tp->out_of_order_queue, 4667 tcp_collapse(sk, &tp->out_of_order_queue,
4621 head, skb, start, end); 4668 head, skb, start, end);
4622 head = skb; 4669 head = skb;
4623 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 4670 if (!skb)
4624 break; 4671 break;
4625 /* Start new segment */ 4672 /* Start new segment */
4626 start = TCP_SKB_CB(skb)->seq; 4673 start = TCP_SKB_CB(skb)->seq;
@@ -4681,10 +4728,11 @@ static int tcp_prune_queue(struct sock *sk)
4681 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4728 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4682 4729
4683 tcp_collapse_ofo_queue(sk); 4730 tcp_collapse_ofo_queue(sk);
4684 tcp_collapse(sk, &sk->sk_receive_queue, 4731 if (!skb_queue_empty(&sk->sk_receive_queue))
4685 sk->sk_receive_queue.next, 4732 tcp_collapse(sk, &sk->sk_receive_queue,
4686 (struct sk_buff *)&sk->sk_receive_queue, 4733 skb_peek(&sk->sk_receive_queue),
4687 tp->copied_seq, tp->rcv_nxt); 4734 NULL,
4735 tp->copied_seq, tp->rcv_nxt);
4688 sk_mem_reclaim(sk); 4736 sk_mem_reclaim(sk);
4689 4737
4690 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4738 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 61f55386a236..b6215be0963f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -817,13 +817,20 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
817 struct sk_buff *p; 817 struct sk_buff *p;
818 struct ipv6hdr *iph; 818 struct ipv6hdr *iph;
819 unsigned int nlen; 819 unsigned int nlen;
820 unsigned int hlen;
821 unsigned int off;
820 int flush = 1; 822 int flush = 1;
821 int proto; 823 int proto;
822 __wsum csum; 824 __wsum csum;
823 825
824 iph = skb_gro_header(skb, sizeof(*iph)); 826 off = skb_gro_offset(skb);
825 if (unlikely(!iph)) 827 hlen = off + sizeof(*iph);
826 goto out; 828 iph = skb_gro_header_fast(skb, off);
829 if (skb_gro_header_hard(skb, hlen)) {
830 iph = skb_gro_header_slow(skb, hlen, off);
831 if (unlikely(!iph))
832 goto out;
833 }
827 834
828 skb_gro_pull(skb, sizeof(*iph)); 835 skb_gro_pull(skb, sizeof(*iph));
829 skb_set_transport_header(skb, skb_gro_offset(skb)); 836 skb_set_transport_header(skb, skb_gro_offset(skb));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1394ddb6e35c..032a5ec391c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = {
137 } 137 }
138 }, 138 },
139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
140 .rt6i_protocol = RTPROT_KERNEL,
140 .rt6i_metric = ~(u32) 0, 141 .rt6i_metric = ~(u32) 0,
141 .rt6i_ref = ATOMIC_INIT(1), 142 .rt6i_ref = ATOMIC_INIT(1),
142}; 143};
@@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
159 } 160 }
160 }, 161 },
161 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 162 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
163 .rt6i_protocol = RTPROT_KERNEL,
162 .rt6i_metric = ~(u32) 0, 164 .rt6i_metric = ~(u32) 0,
163 .rt6i_ref = ATOMIC_INIT(1), 165 .rt6i_ref = ATOMIC_INIT(1),
164}; 166};
@@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
176 } 178 }
177 }, 179 },
178 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 180 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
181 .rt6i_protocol = RTPROT_KERNEL,
179 .rt6i_metric = ~(u32) 0, 182 .rt6i_metric = ~(u32) 0,
180 .rt6i_ref = ATOMIC_INIT(1), 183 .rt6i_ref = ATOMIC_INIT(1),
181}; 184};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d9dd94b6bf66..ea37741062a9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -941,7 +941,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
941 return 0; 941 return 0;
942} 942}
943 943
944struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) 944static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
945 struct sk_buff *skb)
945{ 946{
946 struct ipv6hdr *iph = skb_gro_network_header(skb); 947 struct ipv6hdr *iph = skb_gro_network_header(skb);
947 948
@@ -961,9 +962,8 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
961 962
962 return tcp_gro_receive(head, skb); 963 return tcp_gro_receive(head, skb);
963} 964}
964EXPORT_SYMBOL(tcp6_gro_receive);
965 965
966int tcp6_gro_complete(struct sk_buff *skb) 966static int tcp6_gro_complete(struct sk_buff *skb)
967{ 967{
968 struct ipv6hdr *iph = ipv6_hdr(skb); 968 struct ipv6hdr *iph = ipv6_hdr(skb);
969 struct tcphdr *th = tcp_hdr(skb); 969 struct tcphdr *th = tcp_hdr(skb);
@@ -974,7 +974,6 @@ int tcp6_gro_complete(struct sk_buff *skb)
974 974
975 return tcp_gro_complete(skb); 975 return tcp_gro_complete(skb);
976} 976}
977EXPORT_SYMBOL(tcp6_gro_complete);
978 977
979static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 978static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
980 u32 ts, struct tcp_md5sig_key *key, int rst) 979 u32 ts, struct tcp_md5sig_key *key, int rst)
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 2562ebc1b22c..7af2e74deda8 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -982,17 +982,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
982{ 982{
983 struct sk_buff *tx_skb; 983 struct sk_buff *tx_skb;
984 struct sk_buff *skb; 984 struct sk_buff *skb;
985 int count;
986 985
987 IRDA_ASSERT(self != NULL, return;); 986 IRDA_ASSERT(self != NULL, return;);
988 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 987 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
989 988
990 /* Initialize variables */
991 count = skb_queue_len(&self->wx_list);
992
993 /* Resend unacknowledged frame(s) */ 989 /* Resend unacknowledged frame(s) */
994 skb = skb_peek(&self->wx_list); 990 skb_queue_walk(&self->wx_list, skb) {
995 while (skb != NULL) {
996 irlap_wait_min_turn_around(self, &self->qos_tx); 991 irlap_wait_min_turn_around(self, &self->qos_tx);
997 992
998 /* We copy the skb to be retransmitted since we will have to 993 /* We copy the skb to be retransmitted since we will have to
@@ -1011,21 +1006,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1011 /* 1006 /*
1012 * Set poll bit on the last frame retransmitted 1007 * Set poll bit on the last frame retransmitted
1013 */ 1008 */
1014 if (count-- == 1) 1009 if (skb_queue_is_last(&self->wx_list, skb))
1015 tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ 1010 tx_skb->data[1] |= PF_BIT; /* Set p/f bit */
1016 else 1011 else
1017 tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */ 1012 tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */
1018 1013
1019 irlap_send_i_frame(self, tx_skb, command); 1014 irlap_send_i_frame(self, tx_skb, command);
1020
1021 /*
1022 * If our skb is the last buffer in the list, then
1023 * we are finished, if not, move to the next sk-buffer
1024 */
1025 if (skb == skb_peek_tail(&self->wx_list))
1026 skb = NULL;
1027 else
1028 skb = skb->next;
1029 } 1015 }
1030#if 0 /* Not yet */ 1016#if 0 /* Not yet */
1031 /* 1017 /*
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3477624a4906..c6bab39b018e 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -79,10 +79,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
79 79
80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { 80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
81 /* indicate or confirm not required */ 81 /* indicate or confirm not required */
82 /* XXX this is not very pretty, perhaps we should store
83 * XXX indicate/confirm-needed state in the llc_conn_state_ev
84 * XXX control block of the SKB instead? -DaveM
85 */
86 if (!skb->next) 82 if (!skb->next)
87 goto out_kfree_skb; 83 goto out_kfree_skb;
88 goto out_skb_put; 84 goto out_skb_put;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e7682fe1c590..11c72311f35b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -52,14 +52,6 @@ static const struct file_operations name## _ops = { \
52 52
53DEBUGFS_READONLY_FILE(frequency, 20, "%d", 53DEBUGFS_READONLY_FILE(frequency, 20, "%d",
54 local->hw.conf.channel->center_freq); 54 local->hw.conf.channel->center_freq);
55DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
56 local->hw.wiphy->rts_threshold);
57DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
58 local->hw.wiphy->frag_threshold);
59DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
60 local->hw.wiphy->retry_short);
61DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
62 local->hw.wiphy->retry_long);
63DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 55DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
64 local->total_ps_buffered); 56 local->total_ps_buffered);
65DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x", 57DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
@@ -303,10 +295,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
303 local->debugfs.keys = debugfs_create_dir("keys", phyd); 295 local->debugfs.keys = debugfs_create_dir("keys", phyd);
304 296
305 DEBUGFS_ADD(frequency); 297 DEBUGFS_ADD(frequency);
306 DEBUGFS_ADD(rts_threshold);
307 DEBUGFS_ADD(fragmentation_threshold);
308 DEBUGFS_ADD(short_retry_limit);
309 DEBUGFS_ADD(long_retry_limit);
310 DEBUGFS_ADD(total_ps_buffered); 298 DEBUGFS_ADD(total_ps_buffered);
311 DEBUGFS_ADD(wep_iv); 299 DEBUGFS_ADD(wep_iv);
312 DEBUGFS_ADD(tsf); 300 DEBUGFS_ADD(tsf);
@@ -359,10 +347,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
359void debugfs_hw_del(struct ieee80211_local *local) 347void debugfs_hw_del(struct ieee80211_local *local)
360{ 348{
361 DEBUGFS_DEL(frequency); 349 DEBUGFS_DEL(frequency);
362 DEBUGFS_DEL(rts_threshold);
363 DEBUGFS_DEL(fragmentation_threshold);
364 DEBUGFS_DEL(short_retry_limit);
365 DEBUGFS_DEL(long_retry_limit);
366 DEBUGFS_DEL(total_ps_buffered); 350 DEBUGFS_DEL(total_ps_buffered);
367 DEBUGFS_DEL(wep_iv); 351 DEBUGFS_DEL(wep_iv);
368 DEBUGFS_DEL(tsf); 352 DEBUGFS_DEL(tsf);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c236079ed38a..0b30277eb366 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -535,9 +535,9 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
535 bssid = ifibss->bssid; 535 bssid = ifibss->bssid;
536 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid, 536 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid,
537 ifibss->ssid, ifibss->ssid_len, 537 ifibss->ssid, ifibss->ssid_len,
538 capability,
539 WLAN_CAPABILITY_IBSS | 538 WLAN_CAPABILITY_IBSS |
540 WLAN_CAPABILITY_PRIVACY); 539 WLAN_CAPABILITY_PRIVACY,
540 capability);
541 541
542#ifdef CONFIG_MAC80211_IBSS_DEBUG 542#ifdef CONFIG_MAC80211_IBSS_DEBUG
543 if (bss) 543 if (bss)
@@ -737,6 +737,9 @@ static void ieee80211_ibss_work(struct work_struct *work)
737 struct ieee80211_if_ibss *ifibss; 737 struct ieee80211_if_ibss *ifibss;
738 struct sk_buff *skb; 738 struct sk_buff *skb;
739 739
740 if (WARN_ON(local->suspended))
741 return;
742
740 if (!netif_running(sdata->dev)) 743 if (!netif_running(sdata->dev))
741 return; 744 return;
742 745
@@ -773,10 +776,36 @@ static void ieee80211_ibss_timer(unsigned long data)
773 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 776 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
774 struct ieee80211_local *local = sdata->local; 777 struct ieee80211_local *local = sdata->local;
775 778
779 if (local->quiescing) {
780 ifibss->timer_running = true;
781 return;
782 }
783
776 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); 784 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
777 queue_work(local->hw.workqueue, &ifibss->work); 785 queue_work(local->hw.workqueue, &ifibss->work);
778} 786}
779 787
788#ifdef CONFIG_PM
789void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
790{
791 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
792
793 cancel_work_sync(&ifibss->work);
794 if (del_timer_sync(&ifibss->timer))
795 ifibss->timer_running = true;
796}
797
798void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata)
799{
800 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
801
802 if (ifibss->timer_running) {
803 add_timer(&ifibss->timer);
804 ifibss->timer_running = false;
805 }
806}
807#endif
808
780void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) 809void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
781{ 810{
782 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 811 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 9d1514727f6e..c088c46704a3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -293,6 +293,7 @@ struct ieee80211_if_managed {
293 int auth_tries; /* retries for auth req */ 293 int auth_tries; /* retries for auth req */
294 int assoc_tries; /* retries for assoc req */ 294 int assoc_tries; /* retries for assoc req */
295 295
296 unsigned long timers_running; /* used for quiesce/restart */
296 bool powersave; /* powersave requested for this iface */ 297 bool powersave; /* powersave requested for this iface */
297 298
298 unsigned long request; 299 unsigned long request;
@@ -333,6 +334,9 @@ struct ieee80211_if_ibss {
333 334
334 unsigned long request; 335 unsigned long request;
335 unsigned long last_scan_completed; 336 unsigned long last_scan_completed;
337
338 bool timer_running;
339
336 bool fixed_bssid; 340 bool fixed_bssid;
337 bool fixed_channel; 341 bool fixed_channel;
338 342
@@ -358,6 +362,8 @@ struct ieee80211_if_mesh {
358 struct timer_list mesh_path_timer; 362 struct timer_list mesh_path_timer;
359 struct sk_buff_head skb_queue; 363 struct sk_buff_head skb_queue;
360 364
365 unsigned long timers_running;
366
361 bool housekeeping; 367 bool housekeeping;
362 368
363 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 369 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
@@ -609,6 +615,21 @@ struct ieee80211_local {
609 unsigned int filter_flags; /* FIF_* */ 615 unsigned int filter_flags; /* FIF_* */
610 struct iw_statistics wstats; 616 struct iw_statistics wstats;
611 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 617 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
618
619 /*
620 * suspended is true if we finished all the suspend _and_ we have
621 * not yet come up from resume. This is to be used by mac80211
622 * to ensure driver sanity during suspend and mac80211's own
623 * sanity. It can eventually be used for WoW as well.
624 */
625 bool suspended;
626
627 /*
628 * quiescing is true during the suspend process _only_ to
629 * ease timer cancelling etc.
630 */
631 bool quiescing;
632
612 int tx_headroom; /* required headroom for hardware/radiotap */ 633 int tx_headroom; /* required headroom for hardware/radiotap */
613 634
614 /* Tasklet and skb queue to process calls from IRQ mode. All frames 635 /* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -758,10 +779,6 @@ struct ieee80211_local {
758 struct dentry *rcdir; 779 struct dentry *rcdir;
759 struct dentry *rcname; 780 struct dentry *rcname;
760 struct dentry *frequency; 781 struct dentry *frequency;
761 struct dentry *rts_threshold;
762 struct dentry *fragmentation_threshold;
763 struct dentry *short_retry_limit;
764 struct dentry *long_retry_limit;
765 struct dentry *total_ps_buffered; 782 struct dentry *total_ps_buffered;
766 struct dentry *wep_iv; 783 struct dentry *wep_iv;
767 struct dentry *tsf; 784 struct dentry *tsf;
@@ -938,6 +955,11 @@ void ieee80211_send_pspoll(struct ieee80211_local *local,
938void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); 955void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
939int ieee80211_max_network_latency(struct notifier_block *nb, 956int ieee80211_max_network_latency(struct notifier_block *nb,
940 unsigned long data, void *dummy); 957 unsigned long data, void *dummy);
958void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
959 struct ieee80211_channel_sw_ie *sw_elem,
960 struct ieee80211_bss *bss);
961void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
962void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
941 963
942/* IBSS code */ 964/* IBSS code */
943void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 965void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -950,6 +972,8 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
950int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 972int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
951 struct cfg80211_ibss_params *params); 973 struct cfg80211_ibss_params *params);
952int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 974int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
975void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
976void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
953 977
954/* scan/BSS handling */ 978/* scan/BSS handling */
955void ieee80211_scan_work(struct work_struct *work); 979void ieee80211_scan_work(struct work_struct *work);
@@ -960,6 +984,7 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
960int ieee80211_scan_results(struct ieee80211_local *local, 984int ieee80211_scan_results(struct ieee80211_local *local,
961 struct iw_request_info *info, 985 struct iw_request_info *info,
962 char *buf, size_t len); 986 char *buf, size_t len);
987void ieee80211_scan_cancel(struct ieee80211_local *local);
963ieee80211_rx_result 988ieee80211_rx_result
964ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, 989ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
965 struct sk_buff *skb, 990 struct sk_buff *skb,
@@ -1035,14 +1060,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1035void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1060void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1036 struct ieee80211_mgmt *mgmt, 1061 struct ieee80211_mgmt *mgmt,
1037 size_t len); 1062 size_t len);
1038void ieee80211_chswitch_timer(unsigned long data);
1039void ieee80211_chswitch_work(struct work_struct *work);
1040void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1041 struct ieee80211_channel_sw_ie *sw_elem,
1042 struct ieee80211_bss *bss);
1043void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1044 u16 capab_info, u8 *pwr_constr_elem,
1045 u8 pwr_constr_elem_len);
1046 1063
1047/* Suspend/resume and hw reconfiguration */ 1064/* Suspend/resume and hw reconfiguration */
1048int ieee80211_reconfig(struct ieee80211_local *local); 1065int ieee80211_reconfig(struct ieee80211_local *local);
@@ -1068,8 +1085,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1068 1085
1069/* utility functions/constants */ 1086/* utility functions/constants */
1070extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1087extern void *mac80211_wiphy_privid; /* for wiphy privid */
1071extern const unsigned char rfc1042_header[6];
1072extern const unsigned char bridge_tunnel_header[6];
1073u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 1088u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
1074 enum nl80211_iftype type); 1089 enum nl80211_iftype type);
1075int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1090int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 827ea8e6ee0a..ce267565e180 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -320,7 +320,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
320 case ALG_TKIP: 320 case ALG_TKIP:
321 key->conf.iv_len = TKIP_IV_LEN; 321 key->conf.iv_len = TKIP_IV_LEN;
322 key->conf.icv_len = TKIP_ICV_LEN; 322 key->conf.icv_len = TKIP_ICV_LEN;
323 if (seq && seq_len == 6) { 323 if (seq) {
324 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { 324 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
325 key->u.tkip.rx[i].iv32 = 325 key->u.tkip.rx[i].iv32 =
326 get_unaligned_le32(&seq[2]); 326 get_unaligned_le32(&seq[2]);
@@ -332,7 +332,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
332 case ALG_CCMP: 332 case ALG_CCMP:
333 key->conf.iv_len = CCMP_HDR_LEN; 333 key->conf.iv_len = CCMP_HDR_LEN;
334 key->conf.icv_len = CCMP_MIC_LEN; 334 key->conf.icv_len = CCMP_MIC_LEN;
335 if (seq && seq_len == CCMP_PN_LEN) { 335 if (seq) {
336 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 336 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
337 for (j = 0; j < CCMP_PN_LEN; j++) 337 for (j = 0; j < CCMP_PN_LEN; j++)
338 key->u.ccmp.rx_pn[i][j] = 338 key->u.ccmp.rx_pn[i][j] =
@@ -342,7 +342,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
342 case ALG_AES_CMAC: 342 case ALG_AES_CMAC:
343 key->conf.iv_len = 0; 343 key->conf.iv_len = 0;
344 key->conf.icv_len = sizeof(struct ieee80211_mmie); 344 key->conf.icv_len = sizeof(struct ieee80211_mmie);
345 if (seq && seq_len == 6) 345 if (seq)
346 for (j = 0; j < 6; j++) 346 for (j = 0; j < 6; j++)
347 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1]; 347 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
348 break; 348 break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 76df5eabf268..e37770ced53c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -219,18 +219,26 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
219 u32 changed) 219 u32 changed)
220{ 220{
221 struct ieee80211_local *local = sdata->local; 221 struct ieee80211_local *local = sdata->local;
222 static const u8 zero[ETH_ALEN] = { 0 };
222 223
223 if (!changed) 224 if (!changed)
224 return; 225 return;
225 226
226 if (sdata->vif.type == NL80211_IFTYPE_STATION) 227 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
227 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid; 228 /*
228 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 229 * While not associated, claim a BSSID of all-zeroes
230 * so that drivers don't do any weird things with the
231 * BSSID at that time.
232 */
233 if (sdata->vif.bss_conf.assoc)
234 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
235 else
236 sdata->vif.bss_conf.bssid = zero;
237 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
229 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 238 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
230 else if (sdata->vif.type == NL80211_IFTYPE_AP) 239 else if (sdata->vif.type == NL80211_IFTYPE_AP)
231 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr; 240 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr;
232 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 241 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
233 static const u8 zero[ETH_ALEN] = { 0 };
234 sdata->vif.bss_conf.bssid = zero; 242 sdata->vif.bss_conf.bssid = zero;
235 } else { 243 } else {
236 WARN_ON(1); 244 WARN_ON(1);
@@ -727,9 +735,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
727 * +-------------------------+ 735 * +-------------------------+
728 * 736 *
729 */ 737 */
730 priv_size = ((sizeof(struct ieee80211_local) + 738 priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
731 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
732 priv_data_len;
733 739
734 wiphy = wiphy_new(&mac80211_config_ops, priv_size); 740 wiphy = wiphy_new(&mac80211_config_ops, priv_size);
735 741
@@ -746,9 +752,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
746 752
747 local->hw.wiphy = wiphy; 753 local->hw.wiphy = wiphy;
748 754
749 local->hw.priv = (char *)local + 755 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
750 ((sizeof(struct ieee80211_local) +
751 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
752 756
753 BUG_ON(!ops->tx); 757 BUG_ON(!ops->tx);
754 BUG_ON(!ops->start); 758 BUG_ON(!ops->start);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 9000b01a1671..fc712e60705d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -21,6 +21,9 @@
21#define CAPAB_OFFSET 17 21#define CAPAB_OFFSET 17
22#define ACCEPT_PLINKS 0x80 22#define ACCEPT_PLINKS 0x80
23 23
24#define TMR_RUNNING_HK 0
25#define TMR_RUNNING_MP 1
26
24int mesh_allocated; 27int mesh_allocated;
25static struct kmem_cache *rm_cache; 28static struct kmem_cache *rm_cache;
26 29
@@ -45,6 +48,12 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
45 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 48 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
46 49
47 ifmsh->housekeeping = true; 50 ifmsh->housekeeping = true;
51
52 if (local->quiescing) {
53 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
54 return;
55 }
56
48 queue_work(local->hw.workqueue, &ifmsh->work); 57 queue_work(local->hw.workqueue, &ifmsh->work);
49} 58}
50 59
@@ -343,6 +352,11 @@ static void ieee80211_mesh_path_timer(unsigned long data)
343 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 352 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
344 struct ieee80211_local *local = sdata->local; 353 struct ieee80211_local *local = sdata->local;
345 354
355 if (local->quiescing) {
356 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
357 return;
358 }
359
346 queue_work(local->hw.workqueue, &ifmsh->work); 360 queue_work(local->hw.workqueue, &ifmsh->work);
347} 361}
348 362
@@ -424,6 +438,32 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
424 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 438 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
425} 439}
426 440
441#ifdef CONFIG_PM
442void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
443{
444 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
445
446 /* might restart the timer but that doesn't matter */
447 cancel_work_sync(&ifmsh->work);
448
449 /* use atomic bitops in case both timers fire at the same time */
450
451 if (del_timer_sync(&ifmsh->housekeeping_timer))
452 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
453 if (del_timer_sync(&ifmsh->mesh_path_timer))
454 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
455}
456
457void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
458{
459 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
460
461 if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running))
462 add_timer(&ifmsh->housekeeping_timer);
463 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
464 add_timer(&ifmsh->mesh_path_timer);
465}
466#endif
427 467
428void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) 468void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
429{ 469{
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index d891d7ddccd7..c7d72819cdd2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -191,12 +191,8 @@ struct mesh_rmc {
191#define PLINK_CATEGORY 30 191#define PLINK_CATEGORY 30
192#define MESH_PATH_SEL_CATEGORY 32 192#define MESH_PATH_SEL_CATEGORY 32
193 193
194/* Mesh Header Flags */
195#define IEEE80211S_FLAGS_AE 0x3
196
197/* Public interfaces */ 194/* Public interfaces */
198/* Various */ 195/* Various */
199int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
200int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 196int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
201 struct ieee80211_sub_if_data *sdata); 197 struct ieee80211_sub_if_data *sdata);
202int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 198int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
@@ -267,6 +263,8 @@ void mesh_path_timer(unsigned long data);
267void mesh_path_flush_by_nexthop(struct sta_info *sta); 263void mesh_path_flush_by_nexthop(struct sta_info *sta);
268void mesh_path_discard_frame(struct sk_buff *skb, 264void mesh_path_discard_frame(struct sk_buff *skb,
269 struct ieee80211_sub_if_data *sdata); 265 struct ieee80211_sub_if_data *sdata);
266void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
267void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
270 268
271#ifdef CONFIG_MAC80211_MESH 269#ifdef CONFIG_MAC80211_MESH
272extern int mesh_allocated; 270extern int mesh_allocated;
@@ -294,10 +292,20 @@ static inline void mesh_path_activate(struct mesh_path *mpath)
294 292
295void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 293void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
296 294
295void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
296void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
297void mesh_plink_quiesce(struct sta_info *sta);
298void mesh_plink_restart(struct sta_info *sta);
297#else 299#else
298#define mesh_allocated 0 300#define mesh_allocated 0
299static inline void 301static inline void
300ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} 302ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
303static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
304{}
305static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
306{}
307static inline void mesh_plink_quiesce(struct sta_info *sta) {}
308static inline void mesh_plink_restart(struct sta_info *sta) {}
301#endif 309#endif
302 310
303#endif /* IEEE80211S_H */ 311#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 60b35accda91..003cb470ac84 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -836,8 +836,14 @@ void mesh_path_timer(unsigned long data)
836 mpath = rcu_dereference(mpath); 836 mpath = rcu_dereference(mpath);
837 if (!mpath) 837 if (!mpath)
838 goto endmpathtimer; 838 goto endmpathtimer;
839 spin_lock_bh(&mpath->state_lock);
840 sdata = mpath->sdata; 839 sdata = mpath->sdata;
840
841 if (sdata->local->quiescing) {
842 rcu_read_unlock();
843 return;
844 }
845
846 spin_lock_bh(&mpath->state_lock);
841 if (mpath->flags & MESH_PATH_RESOLVED || 847 if (mpath->flags & MESH_PATH_RESOLVED ||
842 (!(mpath->flags & MESH_PATH_RESOLVING))) 848 (!(mpath->flags & MESH_PATH_RESOLVING)))
843 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 849 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a8bbdeca013a..cb14253587f1 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -266,6 +266,11 @@ static void mesh_plink_timer(unsigned long data)
266 */ 266 */
267 sta = (struct sta_info *) data; 267 sta = (struct sta_info *) data;
268 268
269 if (sta->sdata->local->quiescing) {
270 sta->plink_timer_was_running = true;
271 return;
272 }
273
269 spin_lock_bh(&sta->lock); 274 spin_lock_bh(&sta->lock);
270 if (sta->ignore_plink_timer) { 275 if (sta->ignore_plink_timer) {
271 sta->ignore_plink_timer = false; 276 sta->ignore_plink_timer = false;
@@ -322,6 +327,22 @@ static void mesh_plink_timer(unsigned long data)
322 } 327 }
323} 328}
324 329
330#ifdef CONFIG_PM
331void mesh_plink_quiesce(struct sta_info *sta)
332{
333 if (del_timer_sync(&sta->plink_timer))
334 sta->plink_timer_was_running = true;
335}
336
337void mesh_plink_restart(struct sta_info *sta)
338{
339 if (sta->plink_timer_was_running) {
340 add_timer(&sta->plink_timer);
341 sta->plink_timer_was_running = false;
342 }
343}
344#endif
345
325static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) 346static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
326{ 347{
327 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); 348 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae030688771f..509469cb9265 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -33,10 +33,13 @@
33#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 33#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
34#define IEEE80211_ASSOC_MAX_TRIES 3 34#define IEEE80211_ASSOC_MAX_TRIES 3
35#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 35#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
36#define IEEE80211_PROBE_WAIT (HZ / 20) 36#define IEEE80211_PROBE_WAIT (HZ / 5)
37#define IEEE80211_PROBE_IDLE_TIME (60 * HZ) 37#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
38#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 38#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
39 39
40#define TMR_RUNNING_TIMER 0
41#define TMR_RUNNING_CHANSW 1
42
40/* utils */ 43/* utils */
41static int ecw2cw(int ecw) 44static int ecw2cw(int ecw)
42{ 45{
@@ -121,10 +124,14 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
121 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { 124 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
122 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 125 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
123 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 126 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
124 channel_type = NL80211_CHAN_HT40PLUS; 127 if (!(local->hw.conf.channel->flags &
128 IEEE80211_CHAN_NO_HT40PLUS))
129 channel_type = NL80211_CHAN_HT40PLUS;
125 break; 130 break;
126 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 131 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
127 channel_type = NL80211_CHAN_HT40MINUS; 132 if (!(local->hw.conf.channel->flags &
133 IEEE80211_CHAN_NO_HT40MINUS))
134 channel_type = NL80211_CHAN_HT40MINUS;
128 break; 135 break;
129 } 136 }
130 } 137 }
@@ -349,13 +356,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
349 356
350 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 357 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
351 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 358 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
352 if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) { 359 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
353 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 360 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
354 cap &= ~IEEE80211_HT_CAP_SGI_40; 361 cap &= ~IEEE80211_HT_CAP_SGI_40;
355 } 362 }
356 break; 363 break;
357 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 364 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
358 if (flags & IEEE80211_CHAN_NO_FAT_BELOW) { 365 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
359 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 366 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
360 cap &= ~IEEE80211_HT_CAP_SGI_40; 367 cap &= ~IEEE80211_HT_CAP_SGI_40;
361 } 368 }
@@ -482,6 +489,108 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
482 ieee80211_tx_skb(sdata, skb, 0); 489 ieee80211_tx_skb(sdata, skb, 0);
483} 490}
484 491
492/* spectrum management related things */
493static void ieee80211_chswitch_work(struct work_struct *work)
494{
495 struct ieee80211_sub_if_data *sdata =
496 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
497 struct ieee80211_bss *bss;
498 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
499
500 if (!netif_running(sdata->dev))
501 return;
502
503 bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
504 sdata->local->hw.conf.channel->center_freq,
505 ifmgd->ssid, ifmgd->ssid_len);
506 if (!bss)
507 goto exit;
508
509 sdata->local->oper_channel = sdata->local->csa_channel;
510 /* XXX: shouldn't really modify cfg80211-owned data! */
511 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
512 bss->cbss.channel = sdata->local->oper_channel;
513
514 ieee80211_rx_bss_put(sdata->local, bss);
515exit:
516 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
517 ieee80211_wake_queues_by_reason(&sdata->local->hw,
518 IEEE80211_QUEUE_STOP_REASON_CSA);
519}
520
521static void ieee80211_chswitch_timer(unsigned long data)
522{
523 struct ieee80211_sub_if_data *sdata =
524 (struct ieee80211_sub_if_data *) data;
525 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
526
527 if (sdata->local->quiescing) {
528 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
529 return;
530 }
531
532 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
533}
534
535void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
536 struct ieee80211_channel_sw_ie *sw_elem,
537 struct ieee80211_bss *bss)
538{
539 struct ieee80211_channel *new_ch;
540 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
541 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
542
543 if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
544 return;
545
546 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
547 return;
548
549 /* Disregard subsequent beacons if we are already running a timer
550 processing a CSA */
551
552 if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
553 return;
554
555 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
556 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
557 return;
558
559 sdata->local->csa_channel = new_ch;
560
561 if (sw_elem->count <= 1) {
562 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
563 } else {
564 ieee80211_stop_queues_by_reason(&sdata->local->hw,
565 IEEE80211_QUEUE_STOP_REASON_CSA);
566 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
567 mod_timer(&ifmgd->chswitch_timer,
568 jiffies +
569 msecs_to_jiffies(sw_elem->count *
570 bss->cbss.beacon_interval));
571 }
572}
573
574static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
575 u16 capab_info, u8 *pwr_constr_elem,
576 u8 pwr_constr_elem_len)
577{
578 struct ieee80211_conf *conf = &sdata->local->hw.conf;
579
580 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
581 return;
582
583 /* Power constraint IE length should be 1 octet */
584 if (pwr_constr_elem_len != 1)
585 return;
586
587 if ((*pwr_constr_elem <= conf->channel->max_power) &&
588 (*pwr_constr_elem != sdata->local->power_constr_level)) {
589 sdata->local->power_constr_level = *pwr_constr_elem;
590 ieee80211_hw_config(sdata->local, 0);
591 }
592}
593
485/* powersave */ 594/* powersave */
486static void ieee80211_enable_ps(struct ieee80211_local *local, 595static void ieee80211_enable_ps(struct ieee80211_local *local,
487 struct ieee80211_sub_if_data *sdata) 596 struct ieee80211_sub_if_data *sdata)
@@ -613,6 +722,9 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
613{ 722{
614 struct ieee80211_local *local = (void *) data; 723 struct ieee80211_local *local = (void *) data;
615 724
725 if (local->quiescing)
726 return;
727
616 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work); 728 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
617} 729}
618 730
@@ -865,6 +977,10 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
865 * changed or not. 977 * changed or not.
866 */ 978 */
867 bss_info_changed |= BSS_CHANGED_BASIC_RATES; 979 bss_info_changed |= BSS_CHANGED_BASIC_RATES;
980
981 /* And the BSSID changed - we're associated now */
982 bss_info_changed |= BSS_CHANGED_BSSID;
983
868 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 984 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
869 985
870 /* will be same as sdata */ 986 /* will be same as sdata */
@@ -1064,6 +1180,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1064 } 1180 }
1065 1181
1066 ieee80211_hw_config(local, config_changed); 1182 ieee80211_hw_config(local, config_changed);
1183
1184 /* And the BSSID changed -- not very interesting here */
1185 changed |= BSS_CHANGED_BSSID;
1067 ieee80211_bss_info_change_notify(sdata, changed); 1186 ieee80211_bss_info_change_notify(sdata, changed);
1068 1187
1069 rcu_read_lock(); 1188 rcu_read_lock();
@@ -1270,8 +1389,8 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1270 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1389 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1271 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1390 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1272 ifmgd->ssid_len, NULL, 0); 1391 ifmgd->ssid_len, NULL, 0);
1392 mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
1273 goto unlock; 1393 goto unlock;
1274
1275 } 1394 }
1276 1395
1277 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { 1396 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) {
@@ -1280,15 +1399,16 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1280 ifmgd->ssid_len, NULL, 0); 1399 ifmgd->ssid_len, NULL, 0);
1281 } 1400 }
1282 1401
1402 if (!disassoc)
1403 mod_timer(&ifmgd->timer,
1404 jiffies + IEEE80211_MONITORING_INTERVAL);
1405
1283 unlock: 1406 unlock:
1284 rcu_read_unlock(); 1407 rcu_read_unlock();
1285 1408
1286 if (disassoc) 1409 if (disassoc)
1287 ieee80211_set_disassoc(sdata, true, true, 1410 ieee80211_set_disassoc(sdata, true, true,
1288 WLAN_REASON_PREV_AUTH_NOT_VALID); 1411 WLAN_REASON_PREV_AUTH_NOT_VALID);
1289 else
1290 mod_timer(&ifmgd->timer, jiffies +
1291 IEEE80211_MONITORING_INTERVAL);
1292} 1412}
1293 1413
1294 1414
@@ -1732,7 +1852,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1732 (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) { 1852 (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) {
1733 struct ieee80211_channel_sw_ie *sw_elem = 1853 struct ieee80211_channel_sw_ie *sw_elem =
1734 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1854 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
1735 ieee80211_process_chanswitch(sdata, sw_elem, bss); 1855 ieee80211_sta_process_chanswitch(sdata, sw_elem, bss);
1736 } 1856 }
1737 1857
1738 ieee80211_rx_bss_put(local, bss); 1858 ieee80211_rx_bss_put(local, bss);
@@ -1820,6 +1940,16 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1820 memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0) 1940 memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0)
1821 return; 1941 return;
1822 1942
1943 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
1944#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1945 if (net_ratelimit()) {
1946 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1947 "to a received beacon\n", sdata->dev->name);
1948 }
1949#endif
1950 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1951 }
1952
1823 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); 1953 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
1824 ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, 1954 ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
1825 len - baselen, &elems, 1955 len - baselen, &elems,
@@ -1829,16 +1959,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1829 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len, 1959 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
1830 ifmgd->aid); 1960 ifmgd->aid);
1831 1961
1832 ncrc = crc32_be(ncrc, (void *)&directed_tim, sizeof(directed_tim)); 1962 if (ncrc != ifmgd->beacon_crc) {
1963 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
1964 true);
1833 1965
1834 if (ncrc == ifmgd->beacon_crc) 1966 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1835 return; 1967 elems.wmm_param_len);
1836 ifmgd->beacon_crc = ncrc; 1968 }
1837
1838 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
1839
1840 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1841 elems.wmm_param_len);
1842 1969
1843 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 1970 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
1844 if (directed_tim) { 1971 if (directed_tim) {
@@ -1863,6 +1990,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1863 } 1990 }
1864 } 1991 }
1865 1992
1993 if (ncrc == ifmgd->beacon_crc)
1994 return;
1995 ifmgd->beacon_crc = ncrc;
1996
1866 if (elems.erp_info && elems.erp_info_len >= 1) { 1997 if (elems.erp_info && elems.erp_info_len >= 1) {
1867 erp_valid = true; 1998 erp_valid = true;
1868 erp_value = elems.erp_info[0]; 1999 erp_value = elems.erp_info[0];
@@ -1997,6 +2128,11 @@ static void ieee80211_sta_timer(unsigned long data)
1997 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2128 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1998 struct ieee80211_local *local = sdata->local; 2129 struct ieee80211_local *local = sdata->local;
1999 2130
2131 if (local->quiescing) {
2132 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2133 return;
2134 }
2135
2000 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); 2136 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
2001 queue_work(local->hw.workqueue, &ifmgd->work); 2137 queue_work(local->hw.workqueue, &ifmgd->work);
2002} 2138}
@@ -2129,6 +2265,17 @@ static void ieee80211_sta_work(struct work_struct *work)
2129 2265
2130 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 2266 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
2131 return; 2267 return;
2268
2269 /*
2270 * Nothing should have been stuffed into the workqueue during
2271 * the suspend->resume cycle. If this WARN is seen then there
2272 * is a bug with either the driver suspend or something in
2273 * mac80211 stuffing into the workqueue which we haven't yet
2274 * cleared during mac80211's suspend cycle.
2275 */
2276 if (WARN_ON(local->suspended))
2277 return;
2278
2132 ifmgd = &sdata->u.mgd; 2279 ifmgd = &sdata->u.mgd;
2133 2280
2134 while ((skb = skb_dequeue(&ifmgd->skb_queue))) 2281 while ((skb = skb_dequeue(&ifmgd->skb_queue)))
@@ -2196,6 +2343,38 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
2196 } 2343 }
2197} 2344}
2198 2345
2346#ifdef CONFIG_PM
2347void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2348{
2349 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2350
2351 /*
2352 * we need to use atomic bitops for the running bits
2353 * only because both timers might fire at the same
2354 * time -- the code here is properly synchronised.
2355 */
2356
2357 cancel_work_sync(&ifmgd->work);
2358 cancel_work_sync(&ifmgd->beacon_loss_work);
2359 if (del_timer_sync(&ifmgd->timer))
2360 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2361
2362 cancel_work_sync(&ifmgd->chswitch_work);
2363 if (del_timer_sync(&ifmgd->chswitch_timer))
2364 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
2365}
2366
2367void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2368{
2369 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2370
2371 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
2372 add_timer(&ifmgd->timer);
2373 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
2374 add_timer(&ifmgd->chswitch_timer);
2375}
2376#endif
2377
2199/* interface setup */ 2378/* interface setup */
2200void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) 2379void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2201{ 2380{
@@ -2310,9 +2489,6 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
2310 ifmgd->flags &= ~IEEE80211_STA_BSSID_SET; 2489 ifmgd->flags &= ~IEEE80211_STA_BSSID_SET;
2311 } 2490 }
2312 2491
2313 if (netif_running(sdata->dev))
2314 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
2315
2316 return ieee80211_sta_commit(sdata); 2492 return ieee80211_sta_commit(sdata);
2317} 2493}
2318 2494
@@ -2321,6 +2497,13 @@ int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
2321{ 2497{
2322 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2498 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2323 2499
2500 if (len == 0 && ifmgd->extra_ie_len == 0)
2501 return -EALREADY;
2502
2503 if (len == ifmgd->extra_ie_len && ifmgd->extra_ie &&
2504 memcmp(ifmgd->extra_ie, ie, len) == 0)
2505 return -EALREADY;
2506
2324 kfree(ifmgd->extra_ie); 2507 kfree(ifmgd->extra_ie);
2325 if (len == 0) { 2508 if (len == 0) {
2326 ifmgd->extra_ie = NULL; 2509 ifmgd->extra_ie = NULL;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 9d3d89abbb57..7a549f9deb96 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -2,6 +2,7 @@
2#include <net/rtnetlink.h> 2#include <net/rtnetlink.h>
3 3
4#include "ieee80211_i.h" 4#include "ieee80211_i.h"
5#include "mesh.h"
5#include "driver-ops.h" 6#include "driver-ops.h"
6#include "led.h" 7#include "led.h"
7 8
@@ -13,11 +14,30 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
13 struct sta_info *sta; 14 struct sta_info *sta;
14 unsigned long flags; 15 unsigned long flags;
15 16
17 ieee80211_scan_cancel(local);
18
16 ieee80211_stop_queues_by_reason(hw, 19 ieee80211_stop_queues_by_reason(hw,
17 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 20 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
18 21
22 /* flush out all packets */
23 synchronize_net();
24
25 local->quiescing = true;
26 /* make quiescing visible to timers everywhere */
27 mb();
28
19 flush_workqueue(local->hw.workqueue); 29 flush_workqueue(local->hw.workqueue);
20 30
31 /* Don't try to run timers while suspended. */
32 del_timer_sync(&local->sta_cleanup);
33
34 /*
35 * Note that this particular timer doesn't need to be
36 * restarted at resume.
37 */
38 cancel_work_sync(&local->dynamic_ps_enable_work);
39 del_timer_sync(&local->dynamic_ps_timer);
40
21 /* disable keys */ 41 /* disable keys */
22 list_for_each_entry(sdata, &local->interfaces, list) 42 list_for_each_entry(sdata, &local->interfaces, list)
23 ieee80211_disable_keys(sdata); 43 ieee80211_disable_keys(sdata);
@@ -35,10 +55,20 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
35 55
36 rcu_read_unlock(); 56 rcu_read_unlock();
37 57
58 /* flush again, in case driver queued work */
59 flush_workqueue(local->hw.workqueue);
60
61 /* stop hardware - this must stop RX */
62 if (local->open_count) {
63 ieee80211_led_radio(local, false);
64 drv_stop(local);
65 }
66
38 /* remove STAs */ 67 /* remove STAs */
39 if (local->ops->sta_notify) { 68 spin_lock_irqsave(&local->sta_lock, flags);
40 spin_lock_irqsave(&local->sta_lock, flags); 69 list_for_each_entry(sta, &local->sta_list, list) {
41 list_for_each_entry(sta, &local->sta_list, list) { 70 if (local->ops->sta_notify) {
71 sdata = sta->sdata;
42 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 72 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
43 sdata = container_of(sdata->bss, 73 sdata = container_of(sdata->bss,
44 struct ieee80211_sub_if_data, 74 struct ieee80211_sub_if_data,
@@ -47,29 +77,43 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
47 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, 77 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
48 &sta->sta); 78 &sta->sta);
49 } 79 }
50 spin_unlock_irqrestore(&local->sta_lock, flags); 80
81 mesh_plink_quiesce(sta);
51 } 82 }
83 spin_unlock_irqrestore(&local->sta_lock, flags);
52 84
53 /* remove all interfaces */ 85 /* remove all interfaces */
54 list_for_each_entry(sdata, &local->interfaces, list) { 86 list_for_each_entry(sdata, &local->interfaces, list) {
55 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 87 switch(sdata->vif.type) {
56 sdata->vif.type != NL80211_IFTYPE_MONITOR && 88 case NL80211_IFTYPE_STATION:
57 netif_running(sdata->dev)) { 89 ieee80211_sta_quiesce(sdata);
58 conf.vif = &sdata->vif; 90 break;
59 conf.type = sdata->vif.type; 91 case NL80211_IFTYPE_ADHOC:
60 conf.mac_addr = sdata->dev->dev_addr; 92 ieee80211_ibss_quiesce(sdata);
61 drv_remove_interface(local, &conf); 93 break;
94 case NL80211_IFTYPE_MESH_POINT:
95 ieee80211_mesh_quiesce(sdata);
96 break;
97 case NL80211_IFTYPE_AP_VLAN:
98 case NL80211_IFTYPE_MONITOR:
99 /* don't tell driver about this */
100 continue;
101 default:
102 break;
62 } 103 }
63 }
64 104
65 /* flush again, in case driver queued work */ 105 if (!netif_running(sdata->dev))
66 flush_workqueue(local->hw.workqueue); 106 continue;
67 107
68 /* stop hardware */ 108 conf.vif = &sdata->vif;
69 if (local->open_count) { 109 conf.type = sdata->vif.type;
70 ieee80211_led_radio(local, false); 110 conf.mac_addr = sdata->dev->dev_addr;
71 drv_stop(local); 111 drv_remove_interface(local, &conf);
72 } 112 }
113
114 local->suspended = true;
115 local->quiescing = false;
116
73 return 0; 117 return 0;
74} 118}
75 119
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f962bd1b16e2..6a9b8e63a6bf 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1247,93 +1247,12 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1247} 1247}
1248 1248
1249static int 1249static int
1250ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1250__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1251{ 1251{
1252 struct net_device *dev = rx->dev; 1252 struct net_device *dev = rx->dev;
1253 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1254 u16 hdrlen, ethertype;
1255 u8 *payload;
1256 u8 dst[ETH_ALEN];
1257 u8 src[ETH_ALEN] __aligned(2);
1258 struct sk_buff *skb = rx->skb;
1259 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1253 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1260 1254
1261 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1255 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1262 return -1;
1263
1264 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1265
1266 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1267 * header
1268 * IEEE 802.11 address fields:
1269 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1270 * 0 0 DA SA BSSID n/a
1271 * 0 1 DA BSSID SA n/a
1272 * 1 0 BSSID SA DA n/a
1273 * 1 1 RA TA DA SA
1274 */
1275 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1276 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1277
1278 switch (hdr->frame_control &
1279 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1280 case cpu_to_le16(IEEE80211_FCTL_TODS):
1281 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1282 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1283 return -1;
1284 break;
1285 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1286 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1287 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1288 return -1;
1289 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1290 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1291 (skb->data + hdrlen);
1292 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1293 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1294 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1295 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1296 }
1297 }
1298 break;
1299 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1300 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1301 (is_multicast_ether_addr(dst) &&
1302 !compare_ether_addr(src, dev->dev_addr)))
1303 return -1;
1304 break;
1305 case cpu_to_le16(0):
1306 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1307 return -1;
1308 break;
1309 }
1310
1311 if (unlikely(skb->len - hdrlen < 8))
1312 return -1;
1313
1314 payload = skb->data + hdrlen;
1315 ethertype = (payload[6] << 8) | payload[7];
1316
1317 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1318 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1319 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1320 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1321 * replace EtherType */
1322 skb_pull(skb, hdrlen + 6);
1323 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1324 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1325 } else {
1326 struct ethhdr *ehdr;
1327 __be16 len;
1328
1329 skb_pull(skb, hdrlen);
1330 len = htons(skb->len);
1331 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1332 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1333 memcpy(ehdr->h_source, src, ETH_ALEN);
1334 ehdr->h_proto = len;
1335 }
1336 return 0;
1337} 1256}
1338 1257
1339/* 1258/*
@@ -1472,7 +1391,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1472 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1391 if (!(rx->flags & IEEE80211_RX_AMSDU))
1473 return RX_CONTINUE; 1392 return RX_CONTINUE;
1474 1393
1475 err = ieee80211_data_to_8023(rx); 1394 err = __ieee80211_data_to_8023(rx);
1476 if (unlikely(err)) 1395 if (unlikely(err))
1477 return RX_DROP_UNUSABLE; 1396 return RX_DROP_UNUSABLE;
1478 1397
@@ -1658,7 +1577,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1658 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1577 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1659 return RX_DROP_MONITOR; 1578 return RX_DROP_MONITOR;
1660 1579
1661 err = ieee80211_data_to_8023(rx); 1580 err = __ieee80211_data_to_8023(rx);
1662 if (unlikely(err)) 1581 if (unlikely(err))
1663 return RX_DROP_UNUSABLE; 1582 return RX_DROP_UNUSABLE;
1664 1583
@@ -1846,6 +1765,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1846 sizeof(mgmt->u.action.u.chan_switch))) 1765 sizeof(mgmt->u.action.u.chan_switch)))
1847 return RX_DROP_MONITOR; 1766 return RX_DROP_MONITOR;
1848 1767
1768 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1769 return RX_DROP_MONITOR;
1770
1849 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 1771 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1850 return RX_DROP_MONITOR; 1772 return RX_DROP_MONITOR;
1851 1773
@@ -1856,7 +1778,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1856 if (!bss) 1778 if (!bss)
1857 return RX_DROP_MONITOR; 1779 return RX_DROP_MONITOR;
1858 1780
1859 ieee80211_process_chanswitch(sdata, 1781 ieee80211_sta_process_chanswitch(sdata,
1860 &mgmt->u.action.u.chan_switch.sw_elem, bss); 1782 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1861 ieee80211_rx_bss_put(local, bss); 1783 ieee80211_rx_bss_put(local, bss);
1862 break; 1784 break;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index e65d74ba404b..2a8d09ad17ff 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -631,3 +631,21 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
631 mutex_unlock(&local->scan_mtx); 631 mutex_unlock(&local->scan_mtx);
632 return ret; 632 return ret;
633} 633}
634
635void ieee80211_scan_cancel(struct ieee80211_local *local)
636{
637 bool swscan;
638
639 cancel_delayed_work_sync(&local->scan_work);
640
641 /*
642 * Only call this function when a scan can't be
643 * queued -- mostly at suspend under RTNL.
644 */
645 mutex_lock(&local->scan_mtx);
646 swscan = local->sw_scanning;
647 mutex_unlock(&local->scan_mtx);
648
649 if (swscan)
650 ieee80211_scan_completed(&local->hw, true);
651}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 48bf78e7fa7a..68953033403d 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -84,104 +84,3 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
84 mgmt->sa, mgmt->bssid, 84 mgmt->sa, mgmt->bssid,
85 mgmt->u.action.u.measurement.dialog_token); 85 mgmt->u.action.u.measurement.dialog_token);
86} 86}
87
88void ieee80211_chswitch_work(struct work_struct *work)
89{
90 struct ieee80211_sub_if_data *sdata =
91 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
92 struct ieee80211_bss *bss;
93 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
94
95 if (!netif_running(sdata->dev))
96 return;
97
98 bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
99 sdata->local->hw.conf.channel->center_freq,
100 ifmgd->ssid, ifmgd->ssid_len);
101 if (!bss)
102 goto exit;
103
104 sdata->local->oper_channel = sdata->local->csa_channel;
105 /* XXX: shouldn't really modify cfg80211-owned data! */
106 if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
107 bss->cbss.channel = sdata->local->oper_channel;
108
109 ieee80211_rx_bss_put(sdata->local, bss);
110exit:
111 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
112 ieee80211_wake_queues_by_reason(&sdata->local->hw,
113 IEEE80211_QUEUE_STOP_REASON_CSA);
114}
115
116void ieee80211_chswitch_timer(unsigned long data)
117{
118 struct ieee80211_sub_if_data *sdata =
119 (struct ieee80211_sub_if_data *) data;
120 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
121
122 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
123}
124
125void ieee80211_process_chanswitch(struct ieee80211_sub_if_data *sdata,
126 struct ieee80211_channel_sw_ie *sw_elem,
127 struct ieee80211_bss *bss)
128{
129 struct ieee80211_channel *new_ch;
130 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
131 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
132
133 /* FIXME: Handle ADHOC later */
134 if (sdata->vif.type != NL80211_IFTYPE_STATION)
135 return;
136
137 if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
138 return;
139
140 if (sdata->local->sw_scanning || sdata->local->hw_scanning)
141 return;
142
143 /* Disregard subsequent beacons if we are already running a timer
144 processing a CSA */
145
146 if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
147 return;
148
149 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
150 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
151 return;
152
153 sdata->local->csa_channel = new_ch;
154
155 if (sw_elem->count <= 1) {
156 queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
157 } else {
158 ieee80211_stop_queues_by_reason(&sdata->local->hw,
159 IEEE80211_QUEUE_STOP_REASON_CSA);
160 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
161 mod_timer(&ifmgd->chswitch_timer,
162 jiffies +
163 msecs_to_jiffies(sw_elem->count *
164 bss->cbss.beacon_interval));
165 }
166}
167
168void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
169 u16 capab_info, u8 *pwr_constr_elem,
170 u8 pwr_constr_elem_len)
171{
172 struct ieee80211_conf *conf = &sdata->local->hw.conf;
173
174 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
175 return;
176
177 /* Power constraint IE length should be 1 octet */
178 if (pwr_constr_elem_len != 1)
179 return;
180
181 if ((*pwr_constr_elem <= conf->channel->max_power) &&
182 (*pwr_constr_elem != sdata->local->power_constr_level)) {
183 sdata->local->power_constr_level = *pwr_constr_elem;
184 ieee80211_hw_config(sdata->local, 0);
185 }
186}
187
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a98ea273a155..d5611d8fd0d6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -293,6 +293,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
293 skb_queue_head_init(&sta->ps_tx_buf); 293 skb_queue_head_init(&sta->ps_tx_buf);
294 skb_queue_head_init(&sta->tx_filtered); 294 skb_queue_head_init(&sta->tx_filtered);
295 295
296 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
297 sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX);
298
296#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 299#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
297 printk(KERN_DEBUG "%s: Allocated STA %pM\n", 300 printk(KERN_DEBUG "%s: Allocated STA %pM\n",
298 wiphy_name(local->hw.wiphy), sta->sta.addr); 301 wiphy_name(local->hw.wiphy), sta->sta.addr);
@@ -608,6 +611,9 @@ static void sta_info_cleanup(unsigned long data)
608 sta_info_cleanup_expire_buffered(local, sta); 611 sta_info_cleanup_expire_buffered(local, sta);
609 rcu_read_unlock(); 612 rcu_read_unlock();
610 613
614 if (local->quiescing)
615 return;
616
611 local->sta_cleanup.expires = 617 local->sta_cleanup.expires =
612 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 618 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
613 add_timer(&local->sta_cleanup); 619 add_timer(&local->sta_cleanup);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 164b16cbe0a5..49a1a1f76511 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -216,6 +216,7 @@ struct sta_ampdu_mlme {
216 * @plink_state: peer link state 216 * @plink_state: peer link state
217 * @plink_timeout: timeout of peer link 217 * @plink_timeout: timeout of peer link
218 * @plink_timer: peer link watch timer 218 * @plink_timer: peer link watch timer
219 * @plink_timer_was_running: used by suspend/resume to restore timers
219 * @debugfs: debug filesystem info 220 * @debugfs: debug filesystem info
220 * @sta: station information we share with the driver 221 * @sta: station information we share with the driver
221 */ 222 */
@@ -293,6 +294,7 @@ struct sta_info {
293 __le16 reason; 294 __le16 reason;
294 u8 plink_retries; 295 u8 plink_retries;
295 bool ignore_plink_timer; 296 bool ignore_plink_timer;
297 bool plink_timer_was_running;
296 enum plink_state plink_state; 298 enum plink_state plink_state;
297 u32 plink_timeout; 299 u32 plink_timeout;
298 struct timer_list plink_timer; 300 struct timer_list plink_timer;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8f68bf9746d0..a910148b8228 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -872,6 +872,8 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
872 872
873 do { 873 do {
874 hdr = (void *) skb->data; 874 hdr = (void *) skb->data;
875 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
876 break; /* must not overwrite AID */
875 next_len = skb->next ? skb->next->len : 0; 877 next_len = skb->next ? skb->next->len : 0;
876 group_addr = is_multicast_ether_addr(hdr->addr1); 878 group_addr = is_multicast_ether_addr(hdr->addr1);
877 879
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0689a8fbd1e6..949d857debd8 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -35,15 +35,6 @@
35/* privid for wiphys to determine whether they belong to us or not */ 35/* privid for wiphys to determine whether they belong to us or not */
36void *mac80211_wiphy_privid = &mac80211_wiphy_privid; 36void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
37 37
38/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
39/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
40const unsigned char rfc1042_header[] __aligned(2) =
41 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
42
43/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
44const unsigned char bridge_tunnel_header[] __aligned(2) =
45 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
46
47struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) 38struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
48{ 39{
49 struct ieee80211_local *local; 40 struct ieee80211_local *local;
@@ -103,70 +94,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
103 return NULL; 94 return NULL;
104} 95}
105 96
106unsigned int ieee80211_hdrlen(__le16 fc)
107{
108 unsigned int hdrlen = 24;
109
110 if (ieee80211_is_data(fc)) {
111 if (ieee80211_has_a4(fc))
112 hdrlen = 30;
113 if (ieee80211_is_data_qos(fc))
114 hdrlen += IEEE80211_QOS_CTL_LEN;
115 goto out;
116 }
117
118 if (ieee80211_is_ctl(fc)) {
119 /*
120 * ACK and CTS are 10 bytes, all others 16. To see how
121 * to get this condition consider
122 * subtype mask: 0b0000000011110000 (0x00F0)
123 * ACK subtype: 0b0000000011010000 (0x00D0)
124 * CTS subtype: 0b0000000011000000 (0x00C0)
125 * bits that matter: ^^^ (0x00E0)
126 * value of those: 0b0000000011000000 (0x00C0)
127 */
128 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
129 hdrlen = 10;
130 else
131 hdrlen = 16;
132 }
133out:
134 return hdrlen;
135}
136EXPORT_SYMBOL(ieee80211_hdrlen);
137
138unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
139{
140 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data;
141 unsigned int hdrlen;
142
143 if (unlikely(skb->len < 10))
144 return 0;
145 hdrlen = ieee80211_hdrlen(hdr->frame_control);
146 if (unlikely(hdrlen > skb->len))
147 return 0;
148 return hdrlen;
149}
150EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
151
152int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
153{
154 int ae = meshhdr->flags & IEEE80211S_FLAGS_AE;
155 /* 7.1.3.5a.2 */
156 switch (ae) {
157 case 0:
158 return 6;
159 case 1:
160 return 12;
161 case 2:
162 return 18;
163 case 3:
164 return 24;
165 default:
166 return 6;
167 }
168}
169
170void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) 97void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
171{ 98{
172 struct sk_buff *skb = tx->skb; 99 struct sk_buff *skb = tx->skb;
@@ -1034,6 +961,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1034 struct sta_info *sta; 961 struct sta_info *sta;
1035 unsigned long flags; 962 unsigned long flags;
1036 int res; 963 int res;
964 bool from_suspend = local->suspended;
965
966 /*
967 * We're going to start the hardware, at that point
968 * we are no longer suspended and can RX frames.
969 */
970 local->suspended = false;
1037 971
1038 /* restart hardware */ 972 /* restart hardware */
1039 if (local->open_count) { 973 if (local->open_count) {
@@ -1058,6 +992,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1058 if (local->ops->sta_notify) { 992 if (local->ops->sta_notify) {
1059 spin_lock_irqsave(&local->sta_lock, flags); 993 spin_lock_irqsave(&local->sta_lock, flags);
1060 list_for_each_entry(sta, &local->sta_list, list) { 994 list_for_each_entry(sta, &local->sta_list, list) {
995 sdata = sta->sdata;
1061 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 996 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1062 sdata = container_of(sdata->bss, 997 sdata = container_of(sdata->bss,
1063 struct ieee80211_sub_if_data, 998 struct ieee80211_sub_if_data,
@@ -1128,5 +1063,40 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1128 ieee80211_wake_queues_by_reason(hw, 1063 ieee80211_wake_queues_by_reason(hw,
1129 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1064 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1130 1065
1066 /*
1067 * If this is for hw restart things are still running.
1068 * We may want to change that later, however.
1069 */
1070 if (!from_suspend)
1071 return 0;
1072
1073#ifdef CONFIG_PM
1074 local->suspended = false;
1075
1076 list_for_each_entry(sdata, &local->interfaces, list) {
1077 switch(sdata->vif.type) {
1078 case NL80211_IFTYPE_STATION:
1079 ieee80211_sta_restart(sdata);
1080 break;
1081 case NL80211_IFTYPE_ADHOC:
1082 ieee80211_ibss_restart(sdata);
1083 break;
1084 case NL80211_IFTYPE_MESH_POINT:
1085 ieee80211_mesh_restart(sdata);
1086 break;
1087 default:
1088 break;
1089 }
1090 }
1091
1092 add_timer(&local->sta_cleanup);
1093
1094 spin_lock_irqsave(&local->sta_lock, flags);
1095 list_for_each_entry(sta, &local->sta_list, list)
1096 mesh_plink_restart(sta);
1097 spin_unlock_irqrestore(&local->sta_lock, flags);
1098#else
1099 WARN_ON(1);
1100#endif
1131 return 0; 1101 return 0;
1132} 1102}
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index c14394744a9c..a01154e127f0 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -37,12 +37,13 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
37 37
38 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 38 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
39 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); 39 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
40 if (ret) 40 if (ret && ret != -EALREADY)
41 return ret; 41 return ret;
42 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 42 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
43 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; 43 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
44 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT; 44 sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
45 ieee80211_sta_req_auth(sdata); 45 if (ret != -EALREADY)
46 ieee80211_sta_req_auth(sdata);
46 return 0; 47 return 0;
47 } 48 }
48 49
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 45b74f38b867..694343b9102b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -23,34 +23,6 @@
23 */ 23 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 25
26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
27
28/* Given a data frame determine the 802.1p/1d tag to use. */
29static unsigned int classify_1d(struct sk_buff *skb)
30{
31 unsigned int dscp;
32
33 /* skb->priority values from 256->263 are magic values to
34 * directly indicate a specific 802.1d priority. This is used
35 * to allow 802.1d priority to be passed directly in from VLAN
36 * tags, etc.
37 */
38 if (skb->priority >= 256 && skb->priority <= 263)
39 return skb->priority - 256;
40
41 switch (skb->protocol) {
42 case htons(ETH_P_IP):
43 dscp = ip_hdr(skb)->tos & 0xfc;
44 break;
45
46 default:
47 return 0;
48 }
49
50 return dscp >> 5;
51}
52
53
54static int wme_downgrade_ac(struct sk_buff *skb) 26static int wme_downgrade_ac(struct sk_buff *skb)
55{ 27{
56 switch (skb->priority) { 28 switch (skb->priority) {
@@ -94,7 +66,7 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
94 66
95 /* use the data classifier to determine what 802.1d tag the 67 /* use the data classifier to determine what 802.1d tag the
96 * data frame has */ 68 * data frame has */
97 skb->priority = classify_1d(skb); 69 skb->priority = cfg80211_classify8021d(skb);
98 70
99 /* in case we are a client verify acm is not set for this ac */ 71 /* in case we are a client verify acm is not set for this ac */
100 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 72 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 0f1218b8d289..67e38a056240 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
343 /* not yet present - create a candidate for a new connection 343 /* not yet present - create a candidate for a new connection
344 * and then redo the check */ 344 * and then redo the check */
345 conn = rxrpc_alloc_connection(gfp); 345 conn = rxrpc_alloc_connection(gfp);
346 if (IS_ERR(conn)) { 346 if (!conn) {
347 _leave(" = %ld", PTR_ERR(conn)); 347 _leave(" = -ENOMEM");
348 return PTR_ERR(conn); 348 return -ENOMEM;
349 } 349 }
350 350
351 conn->trans = trans; 351 conn->trans = trans;
@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
508 /* not yet present - create a candidate for a new connection and then 508 /* not yet present - create a candidate for a new connection and then
509 * redo the check */ 509 * redo the check */
510 candidate = rxrpc_alloc_connection(gfp); 510 candidate = rxrpc_alloc_connection(gfp);
511 if (IS_ERR(candidate)) { 511 if (!candidate) {
512 _leave(" = %ld", PTR_ERR(candidate)); 512 _leave(" = -ENOMEM");
513 return PTR_ERR(candidate); 513 return -ENOMEM;
514 } 514 }
515 515
516 candidate->trans = trans; 516 candidate->trans = trans;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 428a5ef5b944..a886496bdc3a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -308,6 +308,7 @@ restart:
308 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
309 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
310 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
311 txq_trans_update(slave_txq);
311 __netif_tx_unlock(slave_txq); 312 __netif_tx_unlock(slave_txq);
312 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
313 netif_wake_queue(dev); 314 netif_wake_queue(dev);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 3c3bc9e579ed..45005497c634 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -10,6 +10,14 @@ config CFG80211_REG_DEBUG
10 10
11 If unsure, say N. 11 If unsure, say N.
12 12
13config CFG80211_DEBUGFS
14 bool "cfg80211 DebugFS entries"
15 depends on CFG80211 && DEBUG_FS
16 ---help---
17 You can enable this if you want to debugfs entries for cfg80211.
18
19 If unsure, say N.
20
13config WIRELESS_OLD_REGULATORY 21config WIRELESS_OLD_REGULATORY
14 bool "Old wireless static regulatory definitions" 22 bool "Old wireless static regulatory definitions"
15 default n 23 default n
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 14ea01c4a103..f78c4832a9ca 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o 6obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
7 7
8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o 8cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o
9cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
9cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o 10cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
10 11
11ccflags-y += -D__CHECK_ENDIAN__ 12ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 47c20eb0c04d..a5dbea1da476 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -17,6 +17,7 @@
17#include "nl80211.h" 17#include "nl80211.h"
18#include "core.h" 18#include "core.h"
19#include "sysfs.h" 19#include "sysfs.h"
20#include "debugfs.h"
20 21
21/* name for sysfs, %d is appended */ 22/* name for sysfs, %d is appended */
22#define PHY_NAME "phy" 23#define PHY_NAME "phy"
@@ -228,7 +229,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
228 229
229/* exported functions */ 230/* exported functions */
230 231
231struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) 232struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
232{ 233{
233 static int wiphy_counter; 234 static int wiphy_counter;
234 235
@@ -375,6 +376,8 @@ int wiphy_register(struct wiphy *wiphy)
375 nl80211_send_reg_change_event(&request); 376 nl80211_send_reg_change_event(&request);
376 } 377 }
377 378
379 cfg80211_debugfs_drv_add(drv);
380
378 res = 0; 381 res = 0;
379out_unlock: 382out_unlock:
380 mutex_unlock(&cfg80211_mutex); 383 mutex_unlock(&cfg80211_mutex);
@@ -405,6 +408,8 @@ void wiphy_unregister(struct wiphy *wiphy)
405 /* unlock again before freeing */ 408 /* unlock again before freeing */
406 mutex_unlock(&drv->mtx); 409 mutex_unlock(&drv->mtx);
407 410
411 cfg80211_debugfs_drv_del(drv);
412
408 /* If this device got a regulatory hint tell core its 413 /* If this device got a regulatory hint tell core its
409 * free to listen now to a new shiny device regulatory hint */ 414 * free to listen now to a new shiny device regulatory hint */
410 reg_device_remove(wiphy); 415 reg_device_remove(wiphy);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index f14b6c5f4221..ab512bcd8153 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -10,12 +10,13 @@
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/kref.h> 11#include <linux/kref.h>
12#include <linux/rbtree.h> 12#include <linux/rbtree.h>
13#include <linux/debugfs.h>
13#include <net/genetlink.h> 14#include <net/genetlink.h>
14#include <net/cfg80211.h> 15#include <net/cfg80211.h>
15#include "reg.h" 16#include "reg.h"
16 17
17struct cfg80211_registered_device { 18struct cfg80211_registered_device {
18 struct cfg80211_ops *ops; 19 const struct cfg80211_ops *ops;
19 struct list_head list; 20 struct list_head list;
20 /* we hold this mutex during any call so that 21 /* we hold this mutex during any call so that
21 * we cannot do multiple calls at once, and also 22 * we cannot do multiple calls at once, and also
@@ -50,6 +51,17 @@ struct cfg80211_registered_device {
50 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 51 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
51 unsigned long suspend_at; 52 unsigned long suspend_at;
52 53
54#ifdef CONFIG_CFG80211_DEBUGFS
55 /* Debugfs entries */
56 struct wiphy_debugfsdentries {
57 struct dentry *rts_threshold;
58 struct dentry *fragmentation_threshold;
59 struct dentry *short_retry_limit;
60 struct dentry *long_retry_limit;
61 struct dentry *ht40allow_map;
62 } debugfs;
63#endif
64
53 /* must be last because of the way we do wiphy_priv(), 65 /* must be last because of the way we do wiphy_priv(),
54 * and it should at least be aligned to NETDEV_ALIGN */ 66 * and it should at least be aligned to NETDEV_ALIGN */
55 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 67 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
new file mode 100644
index 000000000000..679ddfcec1ee
--- /dev/null
+++ b/net/wireless/debugfs.c
@@ -0,0 +1,131 @@
1/*
2 * cfg80211 debugfs
3 *
4 * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include "core.h"
13#include "debugfs.h"
14
15static int cfg80211_open_file_generic(struct inode *inode, struct file *file)
16{
17 file->private_data = inode->i_private;
18 return 0;
19}
20
21#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
22static ssize_t name## _read(struct file *file, char __user *userbuf, \
23 size_t count, loff_t *ppos) \
24{ \
25 struct wiphy *wiphy= file->private_data; \
26 char buf[buflen]; \
27 int res; \
28 \
29 res = scnprintf(buf, buflen, fmt "\n", ##value); \
30 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
31} \
32 \
33static const struct file_operations name## _ops = { \
34 .read = name## _read, \
35 .open = cfg80211_open_file_generic, \
36};
37
38DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
39 wiphy->rts_threshold)
40DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
41 wiphy->frag_threshold);
42DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
43 wiphy->retry_short)
44DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
45 wiphy->retry_long);
46
47static int ht_print_chan(struct ieee80211_channel *chan,
48 char *buf, int buf_size, int offset)
49{
50 if (WARN_ON(offset > buf_size))
51 return 0;
52
53 if (chan->flags & IEEE80211_CHAN_DISABLED)
54 return snprintf(buf + offset,
55 buf_size - offset,
56 "%d Disabled\n",
57 chan->center_freq);
58
59 return snprintf(buf + offset,
60 buf_size - offset,
61 "%d HT40 %c%c\n",
62 chan->center_freq,
63 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
64 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+');
65}
66
67static ssize_t ht40allow_map_read(struct file *file,
68 char __user *user_buf,
69 size_t count, loff_t *ppos)
70{
71 struct wiphy *wiphy = file->private_data;
72 char *buf;
73 unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
74 enum ieee80211_band band;
75 struct ieee80211_supported_band *sband;
76
77 buf = kzalloc(buf_size, GFP_KERNEL);
78 if (!buf)
79 return -ENOMEM;
80
81 mutex_lock(&cfg80211_mutex);
82
83 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
84 sband = wiphy->bands[band];
85 if (!sband)
86 continue;
87 for (i = 0; i < sband->n_channels; i++)
88 offset += ht_print_chan(&sband->channels[i],
89 buf, buf_size, offset);
90 }
91
92 mutex_unlock(&cfg80211_mutex);
93
94 r = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
95
96 kfree(buf);
97
98 return r;
99}
100
101static const struct file_operations ht40allow_map_ops = {
102 .read = ht40allow_map_read,
103 .open = cfg80211_open_file_generic,
104};
105
106#define DEBUGFS_ADD(name) \
107 drv->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \
108 &drv->wiphy, &name## _ops);
109#define DEBUGFS_DEL(name) \
110 debugfs_remove(drv->debugfs.name); \
111 drv->debugfs.name = NULL;
112
113void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv)
114{
115 struct dentry *phyd = drv->wiphy.debugfsdir;
116
117 DEBUGFS_ADD(rts_threshold);
118 DEBUGFS_ADD(fragmentation_threshold);
119 DEBUGFS_ADD(short_retry_limit);
120 DEBUGFS_ADD(long_retry_limit);
121 DEBUGFS_ADD(ht40allow_map);
122}
123
124void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv)
125{
126 DEBUGFS_DEL(rts_threshold);
127 DEBUGFS_DEL(fragmentation_threshold);
128 DEBUGFS_DEL(short_retry_limit);
129 DEBUGFS_DEL(long_retry_limit);
130 DEBUGFS_DEL(ht40allow_map);
131}
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h
new file mode 100644
index 000000000000..c226983ae66b
--- /dev/null
+++ b/net/wireless/debugfs.h
@@ -0,0 +1,14 @@
1#ifndef __CFG80211_DEBUGFS_H
2#define __CFG80211_DEBUGFS_H
3
4#ifdef CONFIG_CFG80211_DEBUGFS
5void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv);
6void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv);
7#else
8static inline
9void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) {}
10static inline
11void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv) {}
12#endif
13
14#endif /* __CFG80211_DEBUGFS_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a3a152f55dd0..4b4d3c8a1aed 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -57,7 +57,7 @@ static int get_drv_dev_by_info_ifindex(struct nlattr **attrs,
57static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { 57static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
58 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 58 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
59 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 59 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
60 .len = BUS_ID_SIZE-1 }, 60 .len = 20-1 },
61 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, 61 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED },
62 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, 62 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
63 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, 63 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
@@ -77,6 +77,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
77 [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, 77 [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
78 [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, 78 [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
79 [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, 79 [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
80 [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
80 81
81 [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, 82 [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
82 [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, 83 [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
@@ -492,7 +493,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
492 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 493 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
493 struct ieee80211_channel *chan; 494 struct ieee80211_channel *chan;
494 struct ieee80211_sta_ht_cap *ht_cap; 495 struct ieee80211_sta_ht_cap *ht_cap;
495 u32 freq, sec_freq; 496 u32 freq;
496 497
497 if (!rdev->ops->set_channel) { 498 if (!rdev->ops->set_channel) {
498 result = -EOPNOTSUPP; 499 result = -EOPNOTSUPP;
@@ -518,33 +519,28 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
518 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 519 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
519 goto bad_res; 520 goto bad_res;
520 521
521 if (channel_type == NL80211_CHAN_HT40MINUS) 522 if (channel_type == NL80211_CHAN_HT40MINUS &&
522 sec_freq = freq - 20; 523 (chan->flags & IEEE80211_CHAN_NO_HT40MINUS))
523 else if (channel_type == NL80211_CHAN_HT40PLUS) 524 goto bad_res;
524 sec_freq = freq + 20; 525 else if (channel_type == NL80211_CHAN_HT40PLUS &&
525 else 526 (chan->flags & IEEE80211_CHAN_NO_HT40PLUS))
526 sec_freq = 0;
527
528 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
529
530 /* no HT capabilities */
531 if (channel_type != NL80211_CHAN_NO_HT &&
532 !ht_cap->ht_supported)
533 goto bad_res; 527 goto bad_res;
534 528
535 if (sec_freq) { 529 /*
536 struct ieee80211_channel *schan; 530 * At this point we know if that if HT40 was requested
531 * we are allowed to use it and the extension channel
532 * exists.
533 */
534
535 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
537 536
538 /* no 40 MHz capabilities */ 537 /* no HT capabilities or intolerant */
538 if (channel_type != NL80211_CHAN_NO_HT) {
539 if (!ht_cap->ht_supported)
540 goto bad_res;
539 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 541 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
540 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) 542 (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
541 goto bad_res; 543 goto bad_res;
542
543 schan = ieee80211_get_channel(&rdev->wiphy, sec_freq);
544
545 /* Secondary channel not allowed */
546 if (!schan || schan->flags & IEEE80211_CHAN_DISABLED)
547 goto bad_res;
548 } 544 }
549 545
550 result = rdev->ops->set_channel(&rdev->wiphy, chan, 546 result = rdev->ops->set_channel(&rdev->wiphy, chan,
@@ -2571,18 +2567,24 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2571 rem_reg_rules) { 2567 rem_reg_rules) {
2572 num_rules++; 2568 num_rules++;
2573 if (num_rules > NL80211_MAX_SUPP_REG_RULES) 2569 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
2574 goto bad_reg; 2570 return -EINVAL;
2575 } 2571 }
2576 2572
2577 if (!reg_is_valid_request(alpha2)) 2573 mutex_lock(&cfg80211_mutex);
2578 return -EINVAL; 2574
2575 if (!reg_is_valid_request(alpha2)) {
2576 r = -EINVAL;
2577 goto bad_reg;
2578 }
2579 2579
2580 size_of_regd = sizeof(struct ieee80211_regdomain) + 2580 size_of_regd = sizeof(struct ieee80211_regdomain) +
2581 (num_rules * sizeof(struct ieee80211_reg_rule)); 2581 (num_rules * sizeof(struct ieee80211_reg_rule));
2582 2582
2583 rd = kzalloc(size_of_regd, GFP_KERNEL); 2583 rd = kzalloc(size_of_regd, GFP_KERNEL);
2584 if (!rd) 2584 if (!rd) {
2585 return -ENOMEM; 2585 r = -ENOMEM;
2586 goto bad_reg;
2587 }
2586 2588
2587 rd->n_reg_rules = num_rules; 2589 rd->n_reg_rules = num_rules;
2588 rd->alpha2[0] = alpha2[0]; 2590 rd->alpha2[0] = alpha2[0];
@@ -2599,20 +2601,24 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
2599 2601
2600 rule_idx++; 2602 rule_idx++;
2601 2603
2602 if (rule_idx > NL80211_MAX_SUPP_REG_RULES) 2604 if (rule_idx > NL80211_MAX_SUPP_REG_RULES) {
2605 r = -EINVAL;
2603 goto bad_reg; 2606 goto bad_reg;
2607 }
2604 } 2608 }
2605 2609
2606 BUG_ON(rule_idx != num_rules); 2610 BUG_ON(rule_idx != num_rules);
2607 2611
2608 mutex_lock(&cfg80211_mutex);
2609 r = set_regdom(rd); 2612 r = set_regdom(rd);
2613
2610 mutex_unlock(&cfg80211_mutex); 2614 mutex_unlock(&cfg80211_mutex);
2615
2611 return r; 2616 return r;
2612 2617
2613 bad_reg: 2618 bad_reg:
2619 mutex_unlock(&cfg80211_mutex);
2614 kfree(rd); 2620 kfree(rd);
2615 return -EINVAL; 2621 return r;
2616} 2622}
2617 2623
2618static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) 2624static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 537af62ec42b..f87ac1df2df5 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -48,12 +48,6 @@ static struct regulatory_request *last_request;
48/* To trigger userspace events */ 48/* To trigger userspace events */
49static struct platform_device *reg_pdev; 49static struct platform_device *reg_pdev;
50 50
51/* Keep the ordering from large to small */
52static u32 supported_bandwidths[] = {
53 MHZ_TO_KHZ(40),
54 MHZ_TO_KHZ(20),
55};
56
57/* 51/*
58 * Central wireless core regulatory domains, we only need two, 52 * Central wireless core regulatory domains, we only need two,
59 * the current one and a world regulatory domain in case we have no 53 * the current one and a world regulatory domain in case we have no
@@ -388,6 +382,8 @@ static int call_crda(const char *alpha2)
388/* Used by nl80211 before kmalloc'ing our regulatory domain */ 382/* Used by nl80211 before kmalloc'ing our regulatory domain */
389bool reg_is_valid_request(const char *alpha2) 383bool reg_is_valid_request(const char *alpha2)
390{ 384{
385 assert_cfg80211_lock();
386
391 if (!last_request) 387 if (!last_request)
392 return false; 388 return false;
393 389
@@ -435,19 +431,20 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
435 return true; 431 return true;
436} 432}
437 433
438/* Returns value in KHz */ 434static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
439static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range, 435 u32 center_freq_khz,
440 u32 freq) 436 u32 bw_khz)
441{ 437{
442 unsigned int i; 438 u32 start_freq_khz, end_freq_khz;
443 for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) { 439
444 u32 start_freq_khz = freq - supported_bandwidths[i]/2; 440 start_freq_khz = center_freq_khz - (bw_khz/2);
445 u32 end_freq_khz = freq + supported_bandwidths[i]/2; 441 end_freq_khz = center_freq_khz + (bw_khz/2);
446 if (start_freq_khz >= freq_range->start_freq_khz && 442
447 end_freq_khz <= freq_range->end_freq_khz) 443 if (start_freq_khz >= freq_range->start_freq_khz &&
448 return supported_bandwidths[i]; 444 end_freq_khz <= freq_range->end_freq_khz)
449 } 445 return true;
450 return 0; 446
447 return false;
451} 448}
452 449
453/** 450/**
@@ -847,14 +844,17 @@ static u32 map_regdom_flags(u32 rd_flags)
847 844
848static int freq_reg_info_regd(struct wiphy *wiphy, 845static int freq_reg_info_regd(struct wiphy *wiphy,
849 u32 center_freq, 846 u32 center_freq,
850 u32 *bandwidth, 847 u32 desired_bw_khz,
851 const struct ieee80211_reg_rule **reg_rule, 848 const struct ieee80211_reg_rule **reg_rule,
852 const struct ieee80211_regdomain *custom_regd) 849 const struct ieee80211_regdomain *custom_regd)
853{ 850{
854 int i; 851 int i;
855 bool band_rule_found = false; 852 bool band_rule_found = false;
856 const struct ieee80211_regdomain *regd; 853 const struct ieee80211_regdomain *regd;
857 u32 max_bandwidth = 0; 854 bool bw_fits = false;
855
856 if (!desired_bw_khz)
857 desired_bw_khz = MHZ_TO_KHZ(20);
858 858
859 regd = custom_regd ? custom_regd : cfg80211_regdomain; 859 regd = custom_regd ? custom_regd : cfg80211_regdomain;
860 860
@@ -887,38 +887,54 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
887 if (!band_rule_found) 887 if (!band_rule_found)
888 band_rule_found = freq_in_rule_band(fr, center_freq); 888 band_rule_found = freq_in_rule_band(fr, center_freq);
889 889
890 max_bandwidth = freq_max_bandwidth(fr, center_freq); 890 bw_fits = reg_does_bw_fit(fr,
891 center_freq,
892 desired_bw_khz);
891 893
892 if (max_bandwidth && *bandwidth <= max_bandwidth) { 894 if (band_rule_found && bw_fits) {
893 *reg_rule = rr; 895 *reg_rule = rr;
894 *bandwidth = max_bandwidth; 896 return 0;
895 break;
896 } 897 }
897 } 898 }
898 899
899 if (!band_rule_found) 900 if (!band_rule_found)
900 return -ERANGE; 901 return -ERANGE;
901 902
902 return !max_bandwidth; 903 return -EINVAL;
903} 904}
904EXPORT_SYMBOL(freq_reg_info); 905EXPORT_SYMBOL(freq_reg_info);
905 906
906int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 907int freq_reg_info(struct wiphy *wiphy,
907 const struct ieee80211_reg_rule **reg_rule) 908 u32 center_freq,
909 u32 desired_bw_khz,
910 const struct ieee80211_reg_rule **reg_rule)
908{ 911{
909 assert_cfg80211_lock(); 912 assert_cfg80211_lock();
910 return freq_reg_info_regd(wiphy, center_freq, 913 return freq_reg_info_regd(wiphy,
911 bandwidth, reg_rule, NULL); 914 center_freq,
915 desired_bw_khz,
916 reg_rule,
917 NULL);
912} 918}
913 919
920/*
921 * Note that right now we assume the desired channel bandwidth
922 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
923 * per channel, the primary and the extension channel). To support
924 * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a
925 * new ieee80211_channel.target_bw and re run the regulatory check
926 * on the wiphy with the target_bw specified. Then we can simply use
927 * that below for the desired_bw_khz below.
928 */
914static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, 929static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
915 unsigned int chan_idx) 930 unsigned int chan_idx)
916{ 931{
917 int r; 932 int r;
918 u32 flags; 933 u32 flags, bw_flags = 0;
919 u32 max_bandwidth = 0; 934 u32 desired_bw_khz = MHZ_TO_KHZ(20);
920 const struct ieee80211_reg_rule *reg_rule = NULL; 935 const struct ieee80211_reg_rule *reg_rule = NULL;
921 const struct ieee80211_power_rule *power_rule = NULL; 936 const struct ieee80211_power_rule *power_rule = NULL;
937 const struct ieee80211_freq_range *freq_range = NULL;
922 struct ieee80211_supported_band *sband; 938 struct ieee80211_supported_band *sband;
923 struct ieee80211_channel *chan; 939 struct ieee80211_channel *chan;
924 struct wiphy *request_wiphy = NULL; 940 struct wiphy *request_wiphy = NULL;
@@ -933,8 +949,10 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
933 949
934 flags = chan->orig_flags; 950 flags = chan->orig_flags;
935 951
936 r = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq), 952 r = freq_reg_info(wiphy,
937 &max_bandwidth, &reg_rule); 953 MHZ_TO_KHZ(chan->center_freq),
954 desired_bw_khz,
955 &reg_rule);
938 956
939 if (r) { 957 if (r) {
940 /* 958 /*
@@ -977,6 +995,10 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
977 } 995 }
978 996
979 power_rule = &reg_rule->power_rule; 997 power_rule = &reg_rule->power_rule;
998 freq_range = &reg_rule->freq_range;
999
1000 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1001 bw_flags = IEEE80211_CHAN_NO_HT40;
980 1002
981 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 1003 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
982 request_wiphy && request_wiphy == wiphy && 1004 request_wiphy && request_wiphy == wiphy &&
@@ -987,19 +1009,19 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
987 * settings 1009 * settings
988 */ 1010 */
989 chan->flags = chan->orig_flags = 1011 chan->flags = chan->orig_flags =
990 map_regdom_flags(reg_rule->flags); 1012 map_regdom_flags(reg_rule->flags) | bw_flags;
991 chan->max_antenna_gain = chan->orig_mag = 1013 chan->max_antenna_gain = chan->orig_mag =
992 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1014 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
993 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1015 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
994 chan->max_power = chan->orig_mpwr = 1016 chan->max_power = chan->orig_mpwr =
995 (int) MBM_TO_DBM(power_rule->max_eirp); 1017 (int) MBM_TO_DBM(power_rule->max_eirp);
996 return; 1018 return;
997 } 1019 }
998 1020
999 chan->flags = flags | map_regdom_flags(reg_rule->flags); 1021 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
1000 chan->max_antenna_gain = min(chan->orig_mag, 1022 chan->max_antenna_gain = min(chan->orig_mag,
1001 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 1023 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
1002 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1024 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1003 if (chan->orig_mpwr) 1025 if (chan->orig_mpwr)
1004 chan->max_power = min(chan->orig_mpwr, 1026 chan->max_power = min(chan->orig_mpwr,
1005 (int) MBM_TO_DBM(power_rule->max_eirp)); 1027 (int) MBM_TO_DBM(power_rule->max_eirp));
@@ -1156,6 +1178,93 @@ static void reg_process_beacons(struct wiphy *wiphy)
1156 wiphy_update_beacon_reg(wiphy); 1178 wiphy_update_beacon_reg(wiphy);
1157} 1179}
1158 1180
1181static bool is_ht40_not_allowed(struct ieee80211_channel *chan)
1182{
1183 if (!chan)
1184 return true;
1185 if (chan->flags & IEEE80211_CHAN_DISABLED)
1186 return true;
1187 /* This would happen when regulatory rules disallow HT40 completely */
1188 if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40)))
1189 return true;
1190 return false;
1191}
1192
1193static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1194 enum ieee80211_band band,
1195 unsigned int chan_idx)
1196{
1197 struct ieee80211_supported_band *sband;
1198 struct ieee80211_channel *channel;
1199 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
1200 unsigned int i;
1201
1202 assert_cfg80211_lock();
1203
1204 sband = wiphy->bands[band];
1205 BUG_ON(chan_idx >= sband->n_channels);
1206 channel = &sband->channels[chan_idx];
1207
1208 if (is_ht40_not_allowed(channel)) {
1209 channel->flags |= IEEE80211_CHAN_NO_HT40;
1210 return;
1211 }
1212
1213 /*
1214 * We need to ensure the extension channels exist to
1215 * be able to use HT40- or HT40+, this finds them (or not)
1216 */
1217 for (i = 0; i < sband->n_channels; i++) {
1218 struct ieee80211_channel *c = &sband->channels[i];
1219 if (c->center_freq == (channel->center_freq - 20))
1220 channel_before = c;
1221 if (c->center_freq == (channel->center_freq + 20))
1222 channel_after = c;
1223 }
1224
1225 /*
1226 * Please note that this assumes target bandwidth is 20 MHz,
1227 * if that ever changes we also need to change the below logic
1228 * to include that as well.
1229 */
1230 if (is_ht40_not_allowed(channel_before))
1231 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
1232 else
1233 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
1234
1235 if (is_ht40_not_allowed(channel_after))
1236 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
1237 else
1238 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
1239}
1240
1241static void reg_process_ht_flags_band(struct wiphy *wiphy,
1242 enum ieee80211_band band)
1243{
1244 unsigned int i;
1245 struct ieee80211_supported_band *sband;
1246
1247 BUG_ON(!wiphy->bands[band]);
1248 sband = wiphy->bands[band];
1249
1250 for (i = 0; i < sband->n_channels; i++)
1251 reg_process_ht_flags_channel(wiphy, band, i);
1252}
1253
1254static void reg_process_ht_flags(struct wiphy *wiphy)
1255{
1256 enum ieee80211_band band;
1257
1258 if (!wiphy)
1259 return;
1260
1261 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1262 if (wiphy->bands[band])
1263 reg_process_ht_flags_band(wiphy, band);
1264 }
1265
1266}
1267
1159void wiphy_update_regulatory(struct wiphy *wiphy, 1268void wiphy_update_regulatory(struct wiphy *wiphy,
1160 enum nl80211_reg_initiator initiator) 1269 enum nl80211_reg_initiator initiator)
1161{ 1270{
@@ -1169,6 +1278,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1169 } 1278 }
1170out: 1279out:
1171 reg_process_beacons(wiphy); 1280 reg_process_beacons(wiphy);
1281 reg_process_ht_flags(wiphy);
1172 if (wiphy->reg_notifier) 1282 if (wiphy->reg_notifier)
1173 wiphy->reg_notifier(wiphy, last_request); 1283 wiphy->reg_notifier(wiphy, last_request);
1174} 1284}
@@ -1179,9 +1289,11 @@ static void handle_channel_custom(struct wiphy *wiphy,
1179 const struct ieee80211_regdomain *regd) 1289 const struct ieee80211_regdomain *regd)
1180{ 1290{
1181 int r; 1291 int r;
1182 u32 max_bandwidth = 0; 1292 u32 desired_bw_khz = MHZ_TO_KHZ(20);
1293 u32 bw_flags = 0;
1183 const struct ieee80211_reg_rule *reg_rule = NULL; 1294 const struct ieee80211_reg_rule *reg_rule = NULL;
1184 const struct ieee80211_power_rule *power_rule = NULL; 1295 const struct ieee80211_power_rule *power_rule = NULL;
1296 const struct ieee80211_freq_range *freq_range = NULL;
1185 struct ieee80211_supported_band *sband; 1297 struct ieee80211_supported_band *sband;
1186 struct ieee80211_channel *chan; 1298 struct ieee80211_channel *chan;
1187 1299
@@ -1191,8 +1303,11 @@ static void handle_channel_custom(struct wiphy *wiphy,
1191 BUG_ON(chan_idx >= sband->n_channels); 1303 BUG_ON(chan_idx >= sband->n_channels);
1192 chan = &sband->channels[chan_idx]; 1304 chan = &sband->channels[chan_idx];
1193 1305
1194 r = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq), 1306 r = freq_reg_info_regd(wiphy,
1195 &max_bandwidth, &reg_rule, regd); 1307 MHZ_TO_KHZ(chan->center_freq),
1308 desired_bw_khz,
1309 &reg_rule,
1310 regd);
1196 1311
1197 if (r) { 1312 if (r) {
1198 chan->flags = IEEE80211_CHAN_DISABLED; 1313 chan->flags = IEEE80211_CHAN_DISABLED;
@@ -1200,10 +1315,14 @@ static void handle_channel_custom(struct wiphy *wiphy,
1200 } 1315 }
1201 1316
1202 power_rule = &reg_rule->power_rule; 1317 power_rule = &reg_rule->power_rule;
1318 freq_range = &reg_rule->freq_range;
1319
1320 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1321 bw_flags = IEEE80211_CHAN_NO_HT40;
1203 1322
1204 chan->flags |= map_regdom_flags(reg_rule->flags); 1323 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
1205 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1324 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1206 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); 1325 chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
1207 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 1326 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
1208} 1327}
1209 1328
@@ -1225,13 +1344,22 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1225 const struct ieee80211_regdomain *regd) 1344 const struct ieee80211_regdomain *regd)
1226{ 1345{
1227 enum ieee80211_band band; 1346 enum ieee80211_band band;
1347 unsigned int bands_set = 0;
1228 1348
1229 mutex_lock(&cfg80211_mutex); 1349 mutex_lock(&cfg80211_mutex);
1230 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1350 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1231 if (wiphy->bands[band]) 1351 if (!wiphy->bands[band])
1232 handle_band_custom(wiphy, band, regd); 1352 continue;
1353 handle_band_custom(wiphy, band, regd);
1354 bands_set++;
1233 } 1355 }
1234 mutex_unlock(&cfg80211_mutex); 1356 mutex_unlock(&cfg80211_mutex);
1357
1358 /*
1359 * no point in calling this if it won't have any effect
1360 * on your device's supportd bands.
1361 */
1362 WARN_ON(!bands_set);
1235} 1363}
1236EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1364EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1237 1365
@@ -1552,6 +1680,13 @@ static int regulatory_hint_core(const char *alpha2)
1552 1680
1553 queue_regulatory_request(request); 1681 queue_regulatory_request(request);
1554 1682
1683 /*
1684 * This ensures last_request is populated once modules
1685 * come swinging in and calling regulatory hints and
1686 * wiphy_apply_custom_regulatory().
1687 */
1688 flush_scheduled_work();
1689
1555 return 0; 1690 return 0;
1556} 1691}
1557 1692
diff --git a/net/wireless/util.c b/net/wireless/util.c
index beb226e78cd7..d072bff463aa 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -4,7 +4,9 @@
4 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7#include <linux/etherdevice.h>
7#include <net/cfg80211.h> 8#include <net/cfg80211.h>
9#include <net/ip.h>
8#include "core.h" 10#include "core.h"
9 11
10struct ieee80211_rate * 12struct ieee80211_rate *
@@ -181,5 +183,323 @@ int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
181 return -EINVAL; 183 return -EINVAL;
182 } 184 }
183 185
186 if (params->seq) {
187 switch (params->cipher) {
188 case WLAN_CIPHER_SUITE_WEP40:
189 case WLAN_CIPHER_SUITE_WEP104:
190 /* These ciphers do not use key sequence */
191 return -EINVAL;
192 case WLAN_CIPHER_SUITE_TKIP:
193 case WLAN_CIPHER_SUITE_CCMP:
194 case WLAN_CIPHER_SUITE_AES_CMAC:
195 if (params->seq_len != 6)
196 return -EINVAL;
197 break;
198 }
199 }
200
201 return 0;
202}
203
204/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
205/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
206const unsigned char rfc1042_header[] __aligned(2) =
207 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
208EXPORT_SYMBOL(rfc1042_header);
209
210/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
211const unsigned char bridge_tunnel_header[] __aligned(2) =
212 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
213EXPORT_SYMBOL(bridge_tunnel_header);
214
215unsigned int ieee80211_hdrlen(__le16 fc)
216{
217 unsigned int hdrlen = 24;
218
219 if (ieee80211_is_data(fc)) {
220 if (ieee80211_has_a4(fc))
221 hdrlen = 30;
222 if (ieee80211_is_data_qos(fc))
223 hdrlen += IEEE80211_QOS_CTL_LEN;
224 goto out;
225 }
226
227 if (ieee80211_is_ctl(fc)) {
228 /*
229 * ACK and CTS are 10 bytes, all others 16. To see how
230 * to get this condition consider
231 * subtype mask: 0b0000000011110000 (0x00F0)
232 * ACK subtype: 0b0000000011010000 (0x00D0)
233 * CTS subtype: 0b0000000011000000 (0x00C0)
234 * bits that matter: ^^^ (0x00E0)
235 * value of those: 0b0000000011000000 (0x00C0)
236 */
237 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
238 hdrlen = 10;
239 else
240 hdrlen = 16;
241 }
242out:
243 return hdrlen;
244}
245EXPORT_SYMBOL(ieee80211_hdrlen);
246
247unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
248{
249 const struct ieee80211_hdr *hdr =
250 (const struct ieee80211_hdr *)skb->data;
251 unsigned int hdrlen;
252
253 if (unlikely(skb->len < 10))
254 return 0;
255 hdrlen = ieee80211_hdrlen(hdr->frame_control);
256 if (unlikely(hdrlen > skb->len))
257 return 0;
258 return hdrlen;
259}
260EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
261
262int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
263{
264 int ae = meshhdr->flags & MESH_FLAGS_AE;
265 /* 7.1.3.5a.2 */
266 switch (ae) {
267 case 0:
268 return 6;
269 case 1:
270 return 12;
271 case 2:
272 return 18;
273 case 3:
274 return 24;
275 default:
276 return 6;
277 }
278}
279
280int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
281 enum nl80211_iftype iftype)
282{
283 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
284 u16 hdrlen, ethertype;
285 u8 *payload;
286 u8 dst[ETH_ALEN];
287 u8 src[ETH_ALEN] __aligned(2);
288
289 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
290 return -1;
291
292 hdrlen = ieee80211_hdrlen(hdr->frame_control);
293
294 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
295 * header
296 * IEEE 802.11 address fields:
297 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
298 * 0 0 DA SA BSSID n/a
299 * 0 1 DA BSSID SA n/a
300 * 1 0 BSSID SA DA n/a
301 * 1 1 RA TA DA SA
302 */
303 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
304 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
305
306 switch (hdr->frame_control &
307 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
308 case cpu_to_le16(IEEE80211_FCTL_TODS):
309 if (unlikely(iftype != NL80211_IFTYPE_AP &&
310 iftype != NL80211_IFTYPE_AP_VLAN))
311 return -1;
312 break;
313 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
314 if (unlikely(iftype != NL80211_IFTYPE_WDS &&
315 iftype != NL80211_IFTYPE_MESH_POINT))
316 return -1;
317 if (iftype == NL80211_IFTYPE_MESH_POINT) {
318 struct ieee80211s_hdr *meshdr =
319 (struct ieee80211s_hdr *) (skb->data + hdrlen);
320 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
321 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
322 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
323 memcpy(src, meshdr->eaddr2, ETH_ALEN);
324 }
325 }
326 break;
327 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
328 if (iftype != NL80211_IFTYPE_STATION ||
329 (is_multicast_ether_addr(dst) &&
330 !compare_ether_addr(src, addr)))
331 return -1;
332 break;
333 case cpu_to_le16(0):
334 if (iftype != NL80211_IFTYPE_ADHOC)
335 return -1;
336 break;
337 }
338
339 if (unlikely(skb->len - hdrlen < 8))
340 return -1;
341
342 payload = skb->data + hdrlen;
343 ethertype = (payload[6] << 8) | payload[7];
344
345 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
346 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
347 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
348 /* remove RFC1042 or Bridge-Tunnel encapsulation and
349 * replace EtherType */
350 skb_pull(skb, hdrlen + 6);
351 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
352 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
353 } else {
354 struct ethhdr *ehdr;
355 __be16 len;
356
357 skb_pull(skb, hdrlen);
358 len = htons(skb->len);
359 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
360 memcpy(ehdr->h_dest, dst, ETH_ALEN);
361 memcpy(ehdr->h_source, src, ETH_ALEN);
362 ehdr->h_proto = len;
363 }
364 return 0;
365}
366EXPORT_SYMBOL(ieee80211_data_to_8023);
367
368int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
369 enum nl80211_iftype iftype, u8 *bssid, bool qos)
370{
371 struct ieee80211_hdr hdr;
372 u16 hdrlen, ethertype;
373 __le16 fc;
374 const u8 *encaps_data;
375 int encaps_len, skip_header_bytes;
376 int nh_pos, h_pos;
377 int head_need;
378
379 if (unlikely(skb->len < ETH_HLEN))
380 return -EINVAL;
381
382 nh_pos = skb_network_header(skb) - skb->data;
383 h_pos = skb_transport_header(skb) - skb->data;
384
385 /* convert Ethernet header to proper 802.11 header (based on
386 * operation mode) */
387 ethertype = (skb->data[12] << 8) | skb->data[13];
388 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
389
390 switch (iftype) {
391 case NL80211_IFTYPE_AP:
392 case NL80211_IFTYPE_AP_VLAN:
393 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
394 /* DA BSSID SA */
395 memcpy(hdr.addr1, skb->data, ETH_ALEN);
396 memcpy(hdr.addr2, addr, ETH_ALEN);
397 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
398 hdrlen = 24;
399 break;
400 case NL80211_IFTYPE_STATION:
401 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
402 /* BSSID SA DA */
403 memcpy(hdr.addr1, bssid, ETH_ALEN);
404 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
405 memcpy(hdr.addr3, skb->data, ETH_ALEN);
406 hdrlen = 24;
407 break;
408 case NL80211_IFTYPE_ADHOC:
409 /* DA SA BSSID */
410 memcpy(hdr.addr1, skb->data, ETH_ALEN);
411 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
412 memcpy(hdr.addr3, bssid, ETH_ALEN);
413 hdrlen = 24;
414 break;
415 default:
416 return -EOPNOTSUPP;
417 }
418
419 if (qos) {
420 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
421 hdrlen += 2;
422 }
423
424 hdr.frame_control = fc;
425 hdr.duration_id = 0;
426 hdr.seq_ctrl = 0;
427
428 skip_header_bytes = ETH_HLEN;
429 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
430 encaps_data = bridge_tunnel_header;
431 encaps_len = sizeof(bridge_tunnel_header);
432 skip_header_bytes -= 2;
433 } else if (ethertype > 0x600) {
434 encaps_data = rfc1042_header;
435 encaps_len = sizeof(rfc1042_header);
436 skip_header_bytes -= 2;
437 } else {
438 encaps_data = NULL;
439 encaps_len = 0;
440 }
441
442 skb_pull(skb, skip_header_bytes);
443 nh_pos -= skip_header_bytes;
444 h_pos -= skip_header_bytes;
445
446 head_need = hdrlen + encaps_len - skb_headroom(skb);
447
448 if (head_need > 0 || skb_cloned(skb)) {
449 head_need = max(head_need, 0);
450 if (head_need)
451 skb_orphan(skb);
452
453 if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
454 printk(KERN_ERR "failed to reallocate Tx buffer\n");
455 return -ENOMEM;
456 }
457 skb->truesize += head_need;
458 }
459
460 if (encaps_data) {
461 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
462 nh_pos += encaps_len;
463 h_pos += encaps_len;
464 }
465
466 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
467
468 nh_pos += hdrlen;
469 h_pos += hdrlen;
470
471 /* Update skb pointers to various headers since this modified frame
472 * is going to go through Linux networking code that may potentially
473 * need things like pointer to IP header. */
474 skb_set_mac_header(skb, 0);
475 skb_set_network_header(skb, nh_pos);
476 skb_set_transport_header(skb, h_pos);
477
184 return 0; 478 return 0;
185} 479}
480EXPORT_SYMBOL(ieee80211_data_from_8023);
481
482/* Given a data frame determine the 802.1p/1d tag to use. */
483unsigned int cfg80211_classify8021d(struct sk_buff *skb)
484{
485 unsigned int dscp;
486
487 /* skb->priority values from 256->263 are magic values to
488 * directly indicate a specific 802.1d priority. This is used
489 * to allow 802.1d priority to be passed directly in from VLAN
490 * tags, etc.
491 */
492 if (skb->priority >= 256 && skb->priority <= 263)
493 return skb->priority - 256;
494
495 switch (skb->protocol) {
496 case htons(ETH_P_IP):
497 dscp = ip_hdr(skb)->tos & 0xfc;
498 break;
499 default:
500 return 0;
501 }
502
503 return dscp >> 5;
504}
505EXPORT_SYMBOL(cfg80211_classify8021d);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index f98090b90fbf..711e00a0c9b5 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -504,6 +504,13 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
504 else if (idx == wdev->wext.default_mgmt_key) 504 else if (idx == wdev->wext.default_mgmt_key)
505 wdev->wext.default_mgmt_key = -1; 505 wdev->wext.default_mgmt_key = -1;
506 } 506 }
507 /*
508 * Applications using wireless extensions expect to be
509 * able to delete keys that don't exist, so allow that.
510 */
511 if (err == -ENOENT)
512 return 0;
513
507 return err; 514 return err;
508 } else { 515 } else {
509 if (addr) 516 if (addr)
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index d3bbef70cc7c..252c2010c2e2 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -636,8 +636,10 @@ static void wireless_seq_printf_stats(struct seq_file *seq,
636/* 636/*
637 * Print info for /proc/net/wireless (print all entries) 637 * Print info for /proc/net/wireless (print all entries)
638 */ 638 */
639static int wireless_seq_show(struct seq_file *seq, void *v) 639static int wireless_dev_seq_show(struct seq_file *seq, void *v)
640{ 640{
641 might_sleep();
642
641 if (v == SEQ_START_TOKEN) 643 if (v == SEQ_START_TOKEN)
642 seq_printf(seq, "Inter-| sta-| Quality | Discarded " 644 seq_printf(seq, "Inter-| sta-| Quality | Discarded "
643 "packets | Missed | WE\n" 645 "packets | Missed | WE\n"
@@ -651,21 +653,41 @@ static int wireless_seq_show(struct seq_file *seq, void *v)
651 653
652static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) 654static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos)
653{ 655{
656 struct net *net = seq_file_net(seq);
657 loff_t off;
658 struct net_device *dev;
659
654 rtnl_lock(); 660 rtnl_lock();
655 return dev_seq_start(seq, pos); 661 if (!*pos)
662 return SEQ_START_TOKEN;
663
664 off = 1;
665 for_each_netdev(net, dev)
666 if (off++ == *pos)
667 return dev;
668 return NULL;
669}
670
671static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
672{
673 struct net *net = seq_file_net(seq);
674
675 ++*pos;
676
677 return v == SEQ_START_TOKEN ?
678 first_net_device(net) : next_net_device(v);
656} 679}
657 680
658static void wireless_dev_seq_stop(struct seq_file *seq, void *v) 681static void wireless_dev_seq_stop(struct seq_file *seq, void *v)
659{ 682{
660 dev_seq_stop(seq, v);
661 rtnl_unlock(); 683 rtnl_unlock();
662} 684}
663 685
664static const struct seq_operations wireless_seq_ops = { 686static const struct seq_operations wireless_seq_ops = {
665 .start = wireless_dev_seq_start, 687 .start = wireless_dev_seq_start,
666 .next = dev_seq_next, 688 .next = wireless_dev_seq_next,
667 .stop = wireless_dev_seq_stop, 689 .stop = wireless_dev_seq_stop,
668 .show = wireless_seq_show, 690 .show = wireless_dev_seq_show,
669}; 691};
670 692
671static int seq_open_wireless(struct inode *inode, struct file *file) 693static int seq_open_wireless(struct inode *inode, struct file *file)
@@ -798,6 +820,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
798 err = -EFAULT; 820 err = -EFAULT;
799 goto out; 821 goto out;
800 } 822 }
823
824 if (cmd == SIOCSIWENCODEEXT) {
825 struct iw_encode_ext *ee = (void *) extra;
826
827 if (iwp->length < sizeof(*ee) + ee->key_len)
828 return -EFAULT;
829 }
801 } 830 }
802 831
803 err = handler(dev, info, (union iwreq_data *) iwp, extra); 832 err = handler(dev, info, (union iwreq_data *) iwp, extra);