aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:50:46 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:50:46 -0400
commitd3502d7f25b22cfc9762bf1781faa9db1bb3be2e (patch)
treee1d0195704efaafa14caf6965c8f2b6b00cbcb83 /net
parentd2a9a8ded48bec153f08ee87a40626c8d0737f79 (diff)
parent0a9f2a467d8dacaf7e97469dba99ed2d07287d80 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (53 commits) [TCP]: Verify the presence of RETRANS bit when leaving FRTO [IPV6]: Call inet6addr_chain notifiers on link down [NET_SCHED]: Kill CONFIG_NET_CLS_POLICE [NET_SCHED]: act_api: qdisc internal reclassify support [NET_SCHED]: sch_dsmark: act_api support [NET_SCHED]: sch_atm: act_api support [NET_SCHED]: sch_atm: Lindent [IPV6]: MSG_ERRQUEUE messages do not pass to connected raw sockets [IPV4]: Cleanup call to __neigh_lookup() [NET_SCHED]: Revert "avoid transmit softirq on watchdog wakeup" optimization [NETFILTER]: nf_conntrack: UDPLITE support [NETFILTER]: nf_conntrack: mark protocols __read_mostly [NETFILTER]: x_tables: add connlimit match [NETFILTER]: Lower *tables printk severity [NETFILTER]: nf_conntrack: Don't track locally generated special ICMP error [NETFILTER]: nf_conntrack: Introduces nf_ct_get_tuplepr and uses it [NETFILTER]: nf_conntrack: make l3proto->prepare() generic and renames it [NETFILTER]: nf_conntrack: Increment error count on parsing IPv4 header [NET]: Add ethtool support for NETIF_F_IPV6_CSUM devices. [AF_IUCV]: Add lock when updating accept_q ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan.h1
-rw-r--r--net/8021q/vlan_dev.c167
-rw-r--r--net/bridge/netfilter/ebtables.c4
-rw-r--r--net/core/dev.c43
-rw-r--r--net/core/dev_mcast.c75
-rw-r--r--net/core/ethtool.c12
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c5
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c25
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c57
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_probe.c1
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c31
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c46
-rw-r--r--net/iucv/Kconfig8
-rw-r--r--net/iucv/af_iucv.c16
-rw-r--r--net/iucv/iucv.c5
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/hostapd_ioctl.h8
-rw-r--r--net/mac80211/ieee80211.c449
-rw-r--r--net/mac80211/ieee80211_common.h9
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/ieee80211_iface.c3
-rw-r--r--net/mac80211/ieee80211_ioctl.c240
-rw-r--r--net/mac80211/ieee80211_sta.c98
-rw-r--r--net/mac80211/rc80211_simple.c8
-rw-r--r--net/netfilter/Kconfig17
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_core.c37
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c266
-rw-r--r--net/netfilter/xt_connlimit.c313
-rw-r--r--net/rfkill/rfkill-input.c2
-rw-r--r--net/sched/Kconfig8
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_police.c246
-rw-r--r--net/sched/cls_api.c40
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sched/sch_api.c73
-rw-r--r--net/sched/sch_atm.c475
-rw-r--r--net/sched/sch_cbq.c48
-rw-r--r--net/sched/sch_dsmark.c34
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_ingress.c19
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/radiotap.c257
58 files changed, 1993 insertions, 1226 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index abb9900edb3f..cda936b77d22 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -373,10 +373,11 @@ void vlan_setup(struct net_device *new_dev)
373 new_dev->open = vlan_dev_open; 373 new_dev->open = vlan_dev_open;
374 new_dev->stop = vlan_dev_stop; 374 new_dev->stop = vlan_dev_stop;
375 new_dev->set_multicast_list = vlan_dev_set_multicast_list; 375 new_dev->set_multicast_list = vlan_dev_set_multicast_list;
376 new_dev->change_rx_flags = vlan_change_rx_flags;
376 new_dev->destructor = free_netdev; 377 new_dev->destructor = free_netdev;
377 new_dev->do_ioctl = vlan_dev_ioctl; 378 new_dev->do_ioctl = vlan_dev_ioctl;
378 379
379 memset(new_dev->broadcast, 0, sizeof(ETH_ALEN)); 380 memset(new_dev->broadcast, 0, ETH_ALEN);
380} 381}
381 382
382static void vlan_transfer_operstate(const struct net_device *dev, struct net_device *vlandev) 383static void vlan_transfer_operstate(const struct net_device *dev, struct net_device *vlandev)
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 62ce1c519aab..7df5b2935579 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -69,6 +69,7 @@ int vlan_dev_set_vlan_flag(const struct net_device *dev,
69 u32 flag, short flag_val); 69 u32 flag, short flag_val);
70void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); 70void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
71void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result); 71void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result);
72void vlan_change_rx_flags(struct net_device *dev, int change);
72void vlan_dev_set_multicast_list(struct net_device *vlan_dev); 73void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
73 74
74int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id); 75int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index d4a62d1b52b4..4d2aa4dd42ac 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -612,90 +612,6 @@ void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result)
612 *result = VLAN_DEV_INFO(dev)->vlan_id; 612 *result = VLAN_DEV_INFO(dev)->vlan_id;
613} 613}
614 614
615static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
616 struct dev_mc_list *dmi2)
617{
618 return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
619 (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
620}
621
622/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
623 * an entire list, and we'll iterate through it.
624 */
625static int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
626{
627 struct dev_mc_list *idmi;
628
629 for (idmi = mc_list; idmi != NULL; ) {
630 if (vlan_dmi_equals(dmi, idmi)) {
631 if (dmi->dmi_users > idmi->dmi_users)
632 return 1;
633 else
634 return 0;
635 } else {
636 idmi = idmi->next;
637 }
638 }
639
640 return 1;
641}
642
643static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
644{
645 struct dev_mc_list *dmi = mc_list;
646 struct dev_mc_list *next;
647
648 while(dmi) {
649 next = dmi->next;
650 kfree(dmi);
651 dmi = next;
652 }
653}
654
655static void vlan_copy_mc_list(struct dev_mc_list *mc_list, struct vlan_dev_info *vlan_info)
656{
657 struct dev_mc_list *dmi, *new_dmi;
658
659 vlan_destroy_mc_list(vlan_info->old_mc_list);
660 vlan_info->old_mc_list = NULL;
661
662 for (dmi = mc_list; dmi != NULL; dmi = dmi->next) {
663 new_dmi = kmalloc(sizeof(*new_dmi), GFP_ATOMIC);
664 if (new_dmi == NULL) {
665 printk(KERN_ERR "vlan: cannot allocate memory. "
666 "Multicast may not work properly from now.\n");
667 return;
668 }
669
670 /* Copy whole structure, then make new 'next' pointer */
671 *new_dmi = *dmi;
672 new_dmi->next = vlan_info->old_mc_list;
673 vlan_info->old_mc_list = new_dmi;
674 }
675}
676
677static void vlan_flush_mc_list(struct net_device *dev)
678{
679 struct dev_mc_list *dmi = dev->mc_list;
680
681 while (dmi) {
682 printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n",
683 dev->name,
684 dmi->dmi_addr[0],
685 dmi->dmi_addr[1],
686 dmi->dmi_addr[2],
687 dmi->dmi_addr[3],
688 dmi->dmi_addr[4],
689 dmi->dmi_addr[5]);
690 dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
691 dmi = dev->mc_list;
692 }
693
694 /* dev->mc_list is NULL by the time we get here. */
695 vlan_destroy_mc_list(VLAN_DEV_INFO(dev)->old_mc_list);
696 VLAN_DEV_INFO(dev)->old_mc_list = NULL;
697}
698
699int vlan_dev_open(struct net_device *dev) 615int vlan_dev_open(struct net_device *dev)
700{ 616{
701 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev); 617 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
@@ -712,6 +628,11 @@ int vlan_dev_open(struct net_device *dev)
712 } 628 }
713 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); 629 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN);
714 630
631 if (dev->flags & IFF_ALLMULTI)
632 dev_set_allmulti(real_dev, 1);
633 if (dev->flags & IFF_PROMISC)
634 dev_set_promiscuity(real_dev, 1);
635
715 return 0; 636 return 0;
716} 637}
717 638
@@ -719,7 +640,11 @@ int vlan_dev_stop(struct net_device *dev)
719{ 640{
720 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev; 641 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
721 642
722 vlan_flush_mc_list(dev); 643 dev_mc_unsync(real_dev, dev);
644 if (dev->flags & IFF_ALLMULTI)
645 dev_set_allmulti(real_dev, -1);
646 if (dev->flags & IFF_PROMISC)
647 dev_set_promiscuity(real_dev, -1);
723 648
724 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 649 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
725 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); 650 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len);
@@ -754,68 +679,18 @@ int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
754 return err; 679 return err;
755} 680}
756 681
757/** Taken from Gleb + Lennert's VLAN code, and modified... */ 682void vlan_change_rx_flags(struct net_device *dev, int change)
758void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
759{ 683{
760 struct dev_mc_list *dmi; 684 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
761 struct net_device *real_dev;
762 int inc;
763
764 if (vlan_dev && (vlan_dev->priv_flags & IFF_802_1Q_VLAN)) {
765 /* Then it's a real vlan device, as far as we can tell.. */
766 real_dev = VLAN_DEV_INFO(vlan_dev)->real_dev;
767
768 /* compare the current promiscuity to the last promisc we had.. */
769 inc = vlan_dev->promiscuity - VLAN_DEV_INFO(vlan_dev)->old_promiscuity;
770 if (inc) {
771 printk(KERN_INFO "%s: dev_set_promiscuity(master, %d)\n",
772 vlan_dev->name, inc);
773 dev_set_promiscuity(real_dev, inc); /* found in dev.c */
774 VLAN_DEV_INFO(vlan_dev)->old_promiscuity = vlan_dev->promiscuity;
775 }
776
777 inc = vlan_dev->allmulti - VLAN_DEV_INFO(vlan_dev)->old_allmulti;
778 if (inc) {
779 printk(KERN_INFO "%s: dev_set_allmulti(master, %d)\n",
780 vlan_dev->name, inc);
781 dev_set_allmulti(real_dev, inc); /* dev.c */
782 VLAN_DEV_INFO(vlan_dev)->old_allmulti = vlan_dev->allmulti;
783 }
784
785 /* looking for addresses to add to master's list */
786 for (dmi = vlan_dev->mc_list; dmi != NULL; dmi = dmi->next) {
787 if (vlan_should_add_mc(dmi, VLAN_DEV_INFO(vlan_dev)->old_mc_list)) {
788 dev_mc_add(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
789 printk(KERN_DEBUG "%s: add %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address to master interface\n",
790 vlan_dev->name,
791 dmi->dmi_addr[0],
792 dmi->dmi_addr[1],
793 dmi->dmi_addr[2],
794 dmi->dmi_addr[3],
795 dmi->dmi_addr[4],
796 dmi->dmi_addr[5]);
797 }
798 }
799 685
800 /* looking for addresses to delete from master's list */ 686 if (change & IFF_ALLMULTI)
801 for (dmi = VLAN_DEV_INFO(vlan_dev)->old_mc_list; dmi != NULL; dmi = dmi->next) { 687 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
802 if (vlan_should_add_mc(dmi, vlan_dev->mc_list)) { 688 if (change & IFF_PROMISC)
803 /* if we think we should add it to the new list, then we should really 689 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
804 * delete it from the real list on the underlying device. 690}
805 */
806 dev_mc_delete(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
807 printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from master interface\n",
808 vlan_dev->name,
809 dmi->dmi_addr[0],
810 dmi->dmi_addr[1],
811 dmi->dmi_addr[2],
812 dmi->dmi_addr[3],
813 dmi->dmi_addr[4],
814 dmi->dmi_addr[5]);
815 }
816 }
817 691
818 /* save multicast list */ 692/** Taken from Gleb + Lennert's VLAN code, and modified... */
819 vlan_copy_mc_list(vlan_dev->mc_list, VLAN_DEV_INFO(vlan_dev)); 693void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
820 } 694{
695 dev_mc_sync(VLAN_DEV_INFO(vlan_dev)->real_dev, vlan_dev);
821} 696}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index ac9984f98e59..4169a2a89a39 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1525,14 +1525,14 @@ static int __init ebtables_init(void)
1525 if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0) 1525 if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
1526 return ret; 1526 return ret;
1527 1527
1528 printk(KERN_NOTICE "Ebtables v2.0 registered\n"); 1528 printk(KERN_INFO "Ebtables v2.0 registered\n");
1529 return 0; 1529 return 0;
1530} 1530}
1531 1531
1532static void __exit ebtables_fini(void) 1532static void __exit ebtables_fini(void)
1533{ 1533{
1534 nf_unregister_sockopt(&ebt_sockopts); 1534 nf_unregister_sockopt(&ebt_sockopts);
1535 printk(KERN_NOTICE "Ebtables v2.0 unregistered\n"); 1535 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
1536} 1536}
1537 1537
1538EXPORT_SYMBOL(ebt_register_table); 1538EXPORT_SYMBOL(ebt_register_table);
diff --git a/net/core/dev.c b/net/core/dev.c
index 96443055324e..13a0d9f6da54 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -98,6 +98,7 @@
98#include <linux/seq_file.h> 98#include <linux/seq_file.h>
99#include <linux/stat.h> 99#include <linux/stat.h>
100#include <linux/if_bridge.h> 100#include <linux/if_bridge.h>
101#include <linux/if_macvlan.h>
101#include <net/dst.h> 102#include <net/dst.h>
102#include <net/pkt_sched.h> 103#include <net/pkt_sched.h>
103#include <net/checksum.h> 104#include <net/checksum.h>
@@ -1813,6 +1814,28 @@ static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1813#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) 1814#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1814#endif 1815#endif
1815 1816
1817#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1818struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1819EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1820
1821static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1822 struct packet_type **pt_prev,
1823 int *ret,
1824 struct net_device *orig_dev)
1825{
1826 if (skb->dev->macvlan_port == NULL)
1827 return skb;
1828
1829 if (*pt_prev) {
1830 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1831 *pt_prev = NULL;
1832 }
1833 return macvlan_handle_frame_hook(skb);
1834}
1835#else
1836#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
1837#endif
1838
1816#ifdef CONFIG_NET_CLS_ACT 1839#ifdef CONFIG_NET_CLS_ACT
1817/* TODO: Maybe we should just force sch_ingress to be compiled in 1840/* TODO: Maybe we should just force sch_ingress to be compiled in
1818 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 1841 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@ -1920,6 +1943,9 @@ ncls:
1920 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); 1943 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
1921 if (!skb) 1944 if (!skb)
1922 goto out; 1945 goto out;
1946 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
1947 if (!skb)
1948 goto out;
1923 1949
1924 type = skb->protocol; 1950 type = skb->protocol;
1925 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { 1951 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
@@ -2521,6 +2547,8 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
2521{ 2547{
2522 unsigned short old_flags = dev->flags; 2548 unsigned short old_flags = dev->flags;
2523 2549
2550 ASSERT_RTNL();
2551
2524 if ((dev->promiscuity += inc) == 0) 2552 if ((dev->promiscuity += inc) == 0)
2525 dev->flags &= ~IFF_PROMISC; 2553 dev->flags &= ~IFF_PROMISC;
2526 else 2554 else
@@ -2535,6 +2563,9 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
2535 dev->name, (dev->flags & IFF_PROMISC), 2563 dev->name, (dev->flags & IFF_PROMISC),
2536 (old_flags & IFF_PROMISC), 2564 (old_flags & IFF_PROMISC),
2537 audit_get_loginuid(current->audit_context)); 2565 audit_get_loginuid(current->audit_context));
2566
2567 if (dev->change_rx_flags)
2568 dev->change_rx_flags(dev, IFF_PROMISC);
2538 } 2569 }
2539} 2570}
2540 2571
@@ -2573,11 +2604,16 @@ void dev_set_allmulti(struct net_device *dev, int inc)
2573{ 2604{
2574 unsigned short old_flags = dev->flags; 2605 unsigned short old_flags = dev->flags;
2575 2606
2607 ASSERT_RTNL();
2608
2576 dev->flags |= IFF_ALLMULTI; 2609 dev->flags |= IFF_ALLMULTI;
2577 if ((dev->allmulti += inc) == 0) 2610 if ((dev->allmulti += inc) == 0)
2578 dev->flags &= ~IFF_ALLMULTI; 2611 dev->flags &= ~IFF_ALLMULTI;
2579 if (dev->flags ^ old_flags) 2612 if (dev->flags ^ old_flags) {
2613 if (dev->change_rx_flags)
2614 dev->change_rx_flags(dev, IFF_ALLMULTI);
2580 dev_set_rx_mode(dev); 2615 dev_set_rx_mode(dev);
2616 }
2581} 2617}
2582 2618
2583/* 2619/*
@@ -2778,6 +2814,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2778 int ret, changes; 2814 int ret, changes;
2779 int old_flags = dev->flags; 2815 int old_flags = dev->flags;
2780 2816
2817 ASSERT_RTNL();
2818
2781 /* 2819 /*
2782 * Set the flags on our device. 2820 * Set the flags on our device.
2783 */ 2821 */
@@ -2792,6 +2830,9 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2792 * Load in the correct multicast list now the flags have changed. 2830 * Load in the correct multicast list now the flags have changed.
2793 */ 2831 */
2794 2832
2833 if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
2834 dev->change_rx_flags(dev, IFF_MULTICAST);
2835
2795 dev_set_rx_mode(dev); 2836 dev_set_rx_mode(dev);
2796 2837
2797 /* 2838 /*
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index aa38100601fb..235a2a8a0d05 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -102,6 +102,81 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
102 return err; 102 return err;
103} 103}
104 104
105/**
106 * dev_mc_sync - Synchronize device's multicast list to another device
107 * @to: destination device
108 * @from: source device
109 *
110 * Add newly added addresses to the destination device and release
111 * addresses that have no users left. The source device must be
112 * locked by netif_tx_lock_bh.
113 *
114 * This function is intended to be called from the dev->set_multicast_list
115 * function of layered software devices.
116 */
117int dev_mc_sync(struct net_device *to, struct net_device *from)
118{
119 struct dev_addr_list *da;
120 int err = 0;
121
122 netif_tx_lock_bh(to);
123 for (da = from->mc_list; da != NULL; da = da->next) {
124 if (!da->da_synced) {
125 err = __dev_addr_add(&to->mc_list, &to->mc_count,
126 da->da_addr, da->da_addrlen, 0);
127 if (err < 0)
128 break;
129 da->da_synced = 1;
130 da->da_users++;
131 } else if (da->da_users == 1) {
132 __dev_addr_delete(&to->mc_list, &to->mc_count,
133 da->da_addr, da->da_addrlen, 0);
134 __dev_addr_delete(&from->mc_list, &from->mc_count,
135 da->da_addr, da->da_addrlen, 0);
136 }
137 }
138 if (!err)
139 __dev_set_rx_mode(to);
140 netif_tx_unlock_bh(to);
141
142 return err;
143}
144EXPORT_SYMBOL(dev_mc_sync);
145
146
147/**
148 * dev_mc_unsync - Remove synchronized addresses from the destination
149 * device
150 * @to: destination device
151 * @from: source device
152 *
153 * Remove all addresses that were added to the destination device by
154 * dev_mc_sync(). This function is intended to be called from the
155 * dev->stop function of layered software devices.
156 */
157void dev_mc_unsync(struct net_device *to, struct net_device *from)
158{
159 struct dev_addr_list *da;
160
161 netif_tx_lock_bh(from);
162 netif_tx_lock_bh(to);
163
164 for (da = from->mc_list; da != NULL; da = da->next) {
165 if (!da->da_synced)
166 continue;
167 __dev_addr_delete(&to->mc_list, &to->mc_count,
168 da->da_addr, da->da_addrlen, 0);
169 da->da_synced = 0;
170 __dev_addr_delete(&from->mc_list, &from->mc_count,
171 da->da_addr, da->da_addrlen, 0);
172 }
173 __dev_set_rx_mode(to);
174
175 netif_tx_unlock_bh(to);
176 netif_tx_unlock_bh(from);
177}
178EXPORT_SYMBOL(dev_mc_unsync);
179
105/* 180/*
106 * Discard multicast list when a device is downed 181 * Discard multicast list when a device is downed
107 */ 182 */
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 8d5e5a09b576..0b531e98ec33 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -52,6 +52,17 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
52 52
53 return 0; 53 return 0;
54} 54}
55
56int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
57{
58 if (data)
59 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
60 else
61 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
62
63 return 0;
64}
65
55u32 ethtool_op_get_sg(struct net_device *dev) 66u32 ethtool_op_get_sg(struct net_device *dev)
56{ 67{
57 return (dev->features & NETIF_F_SG) != 0; 68 return (dev->features & NETIF_F_SG) != 0;
@@ -980,5 +991,6 @@ EXPORT_SYMBOL(ethtool_op_set_sg);
980EXPORT_SYMBOL(ethtool_op_set_tso); 991EXPORT_SYMBOL(ethtool_op_set_tso);
981EXPORT_SYMBOL(ethtool_op_set_tx_csum); 992EXPORT_SYMBOL(ethtool_op_set_tx_csum);
982EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); 993EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
994EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
983EXPORT_SYMBOL(ethtool_op_set_ufo); 995EXPORT_SYMBOL(ethtool_op_set_ufo);
984EXPORT_SYMBOL(ethtool_op_get_ufo); 996EXPORT_SYMBOL(ethtool_op_get_ufo);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index e00767e8ebd9..9ab9d534fbac 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -885,7 +885,7 @@ static int arp_process(struct sk_buff *skb)
885 if (n == NULL && 885 if (n == NULL &&
886 arp->ar_op == htons(ARPOP_REPLY) && 886 arp->ar_op == htons(ARPOP_REPLY) &&
887 inet_addr_type(sip) == RTN_UNICAST) 887 inet_addr_type(sip) == RTN_UNICAST)
888 n = __neigh_lookup(&arp_tbl, &sip, dev, -1); 888 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
889 } 889 }
890 890
891 if (n) { 891 if (n) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index a73cf93cee36..2586df09b9b6 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -14,7 +14,8 @@
14#include <net/ip.h> 14#include <net/ip.h>
15 15
16/* Must be called with locally disabled BHs. */ 16/* Must be called with locally disabled BHs. */
17void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) 17static void __inet_twsk_kill(struct inet_timewait_sock *tw,
18 struct inet_hashinfo *hashinfo)
18{ 19{
19 struct inet_bind_hashbucket *bhead; 20 struct inet_bind_hashbucket *bhead;
20 struct inet_bind_bucket *tb; 21 struct inet_bind_bucket *tb;
@@ -47,8 +48,6 @@ void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashi
47 inet_twsk_put(tw); 48 inet_twsk_put(tw);
48} 49}
49 50
50EXPORT_SYMBOL_GPL(__inet_twsk_kill);
51
52/* 51/*
53 * Enter the time wait state. This is called with locally disabled BH. 52 * Enter the time wait state. This is called with locally disabled BH.
54 * Essentially we whip up a timewait bucket, copy the relevant info into it 53 * Essentially we whip up a timewait bucket, copy the relevant info into it
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e981232942a1..d1149aba9351 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1184,7 +1184,7 @@ static int __init arp_tables_init(void)
1184 if (ret < 0) 1184 if (ret < 0)
1185 goto err4; 1185 goto err4;
1186 1186
1187 printk("arp_tables: (C) 2002 David S. Miller\n"); 1187 printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n");
1188 return 0; 1188 return 0;
1189 1189
1190err4: 1190err4:
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 3c5629938487..64552afd01cb 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -78,21 +78,26 @@ nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
78 return skb; 78 return skb;
79} 79}
80 80
81static int 81static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
82ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, 82 unsigned int *dataoff, u_int8_t *protonum)
83 u_int8_t *protonum)
84{ 83{
84 struct iphdr _iph, *iph;
85
86 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
87 if (iph == NULL)
88 return -NF_DROP;
89
85 /* Never happen */ 90 /* Never happen */
86 if (ip_hdr(*pskb)->frag_off & htons(IP_OFFSET)) { 91 if (iph->frag_off & htons(IP_OFFSET)) {
87 if (net_ratelimit()) { 92 if (net_ratelimit()) {
88 printk(KERN_ERR "ipv4_prepare: Frag of proto %u (hook=%u)\n", 93 printk(KERN_ERR "ipv4_get_l4proto: Frag of proto %u\n",
89 ip_hdr(*pskb)->protocol, hooknum); 94 iph->protocol);
90 } 95 }
91 return -NF_DROP; 96 return -NF_DROP;
92 } 97 }
93 98
94 *dataoff = skb_network_offset(*pskb) + ip_hdrlen(*pskb); 99 *dataoff = nhoff + (iph->ihl << 2);
95 *protonum = ip_hdr(*pskb)->protocol; 100 *protonum = iph->protocol;
96 101
97 return NF_ACCEPT; 102 return NF_ACCEPT;
98} 103}
@@ -400,14 +405,14 @@ static struct nf_sockopt_ops so_getorigdst = {
400 .get = &getorigdst, 405 .get = &getorigdst,
401}; 406};
402 407
403struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { 408struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
404 .l3proto = PF_INET, 409 .l3proto = PF_INET,
405 .name = "ipv4", 410 .name = "ipv4",
406 .pkt_to_tuple = ipv4_pkt_to_tuple, 411 .pkt_to_tuple = ipv4_pkt_to_tuple,
407 .invert_tuple = ipv4_invert_tuple, 412 .invert_tuple = ipv4_invert_tuple,
408 .print_tuple = ipv4_print_tuple, 413 .print_tuple = ipv4_print_tuple,
409 .print_conntrack = ipv4_print_conntrack, 414 .print_conntrack = ipv4_print_conntrack,
410 .prepare = ipv4_prepare, 415 .get_l4proto = ipv4_get_l4proto,
411#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 416#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
412 .tuple_to_nfattr = ipv4_tuple_to_nfattr, 417 .tuple_to_nfattr = ipv4_tuple_to_nfattr,
413 .nfattr_to_tuple = ipv4_nfattr_to_tuple, 418 .nfattr_to_tuple = ipv4_nfattr_to_tuple,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 0fe8fb0466ef..6593fd2c5b10 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -136,40 +136,22 @@ icmp_error_message(struct sk_buff *skb,
136 unsigned int hooknum) 136 unsigned int hooknum)
137{ 137{
138 struct nf_conntrack_tuple innertuple, origtuple; 138 struct nf_conntrack_tuple innertuple, origtuple;
139 struct {
140 struct icmphdr icmp;
141 struct iphdr ip;
142 } _in, *inside;
143 struct nf_conntrack_l4proto *innerproto; 139 struct nf_conntrack_l4proto *innerproto;
144 struct nf_conntrack_tuple_hash *h; 140 struct nf_conntrack_tuple_hash *h;
145 int dataoff;
146 141
147 NF_CT_ASSERT(skb->nfct == NULL); 142 NF_CT_ASSERT(skb->nfct == NULL);
148 143
149 /* Not enough header? */ 144 /* Are they talking about one of our connections? */
150 inside = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_in), &_in); 145 if (!nf_ct_get_tuplepr(skb,
151 if (inside == NULL) 146 skb_network_offset(skb) + ip_hdrlen(skb)
152 return -NF_ACCEPT; 147 + sizeof(struct icmphdr),
153 148 PF_INET, &origtuple)) {
154 /* Ignore ICMP's containing fragments (shouldn't happen) */ 149 pr_debug("icmp_error_message: failed to get tuple\n");
155 if (inside->ip.frag_off & htons(IP_OFFSET)) {
156 pr_debug("icmp_error_message: fragment of proto %u\n",
157 inside->ip.protocol);
158 return -NF_ACCEPT; 150 return -NF_ACCEPT;
159 } 151 }
160 152
161 /* rcu_read_lock()ed by nf_hook_slow */ 153 /* rcu_read_lock()ed by nf_hook_slow */
162 innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); 154 innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum);
163
164 dataoff = ip_hdrlen(skb) + sizeof(inside->icmp);
165 /* Are they talking about one of our connections? */
166 if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,
167 inside->ip.protocol, &origtuple,
168 &nf_conntrack_l3proto_ipv4, innerproto)) {
169 pr_debug("icmp_error_message: ! get_tuple p=%u",
170 inside->ip.protocol);
171 return -NF_ACCEPT;
172 }
173 155
174 /* Ordinarily, we'd expect the inverted tupleproto, but it's 156 /* Ordinarily, we'd expect the inverted tupleproto, but it's
175 been preserved inside the ICMP. */ 157 been preserved inside the ICMP. */
@@ -183,25 +165,13 @@ icmp_error_message(struct sk_buff *skb,
183 165
184 h = nf_conntrack_find_get(&innertuple); 166 h = nf_conntrack_find_get(&innertuple);
185 if (!h) { 167 if (!h) {
186 /* Locally generated ICMPs will match inverted if they 168 pr_debug("icmp_error_message: no match\n");
187 haven't been SNAT'ed yet */ 169 return -NF_ACCEPT;
188 /* FIXME: NAT code has to handle half-done double NAT --RR */
189 if (hooknum == NF_IP_LOCAL_OUT)
190 h = nf_conntrack_find_get(&origtuple);
191
192 if (!h) {
193 pr_debug("icmp_error_message: no match\n");
194 return -NF_ACCEPT;
195 }
196
197 /* Reverse direction from that found */
198 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
199 *ctinfo += IP_CT_IS_REPLY;
200 } else {
201 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
202 *ctinfo += IP_CT_IS_REPLY;
203 } 170 }
204 171
172 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
173 *ctinfo += IP_CT_IS_REPLY;
174
205 /* Update skb to refer to this connection */ 175 /* Update skb to refer to this connection */
206 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 176 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
207 skb->nfctinfo = *ctinfo; 177 skb->nfctinfo = *ctinfo;
@@ -342,7 +312,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
342#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 312#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
343#endif /* CONFIG_SYSCTL */ 313#endif /* CONFIG_SYSCTL */
344 314
345struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = 315struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
346{ 316{
347 .l3proto = PF_INET, 317 .l3proto = PF_INET,
348 .l4proto = IPPROTO_ICMP, 318 .l4proto = IPPROTO_ICMP,
@@ -368,4 +338,3 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
368#endif 338#endif
369#endif 339#endif
370}; 340};
371EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_icmp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 69f9f1ef3ef6..4e5884ac8f29 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1398,7 +1398,9 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1398 * waiting for the first ACK and did not get it)... 1398 * waiting for the first ACK and did not get it)...
1399 */ 1399 */
1400 if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) { 1400 if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
1401 tp->retrans_out += tcp_skb_pcount(skb); 1401 /* For some reason this R-bit might get cleared? */
1402 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1403 tp->retrans_out += tcp_skb_pcount(skb);
1402 /* ...enter this if branch just for the first segment */ 1404 /* ...enter this if branch just for the first segment */
1403 flag |= FLAG_DATA_ACKED; 1405 flag |= FLAG_DATA_ACKED;
1404 } else { 1406 } else {
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 86624fabc4bf..f37d5928921a 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -111,6 +111,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
111 p->snd_una = tp->snd_una; 111 p->snd_una = tp->snd_una;
112 p->snd_cwnd = tp->snd_cwnd; 112 p->snd_cwnd = tp->snd_cwnd;
113 p->snd_wnd = tp->snd_wnd; 113 p->snd_wnd = tp->snd_wnd;
114 p->ssthresh = tcp_current_ssthresh(sk);
114 p->srtt = tp->srtt >> 3; 115 p->srtt = tp->srtt >> 3;
115 116
116 tcp_probe.head = (tcp_probe.head + 1) % bufsize; 117 tcp_probe.head = (tcp_probe.head + 1) % bufsize;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 24424c3b7dc0..06012920912a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2475,6 +2475,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2475 write_unlock_bh(&idev->lock); 2475 write_unlock_bh(&idev->lock);
2476 2476
2477 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2477 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2478 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
2478 in6_ifa_put(ifa); 2479 in6_ifa_put(ifa);
2479 2480
2480 write_lock_bh(&idev->lock); 2481 write_lock_bh(&idev->lock);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4765a29f98a8..6a6714d154ed 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -604,7 +604,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
604 604
605 read_lock(&raw_v6_lock); 605 read_lock(&raw_v6_lock);
606 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { 606 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
607 while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, 607 while ((sk = __raw_v6_lookup(sk, nexthdr, saddr, daddr,
608 IP6CB(skb)->iif))) { 608 IP6CB(skb)->iif))) {
609 rawv6_err(sk, skb, NULL, type, code, inner_offset, info); 609 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
610 sk = sk_next(sk); 610 sk = sk_next(sk);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 254c769b750a..aeda617246b7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1497,7 +1497,7 @@ static int __init ip6_tables_init(void)
1497 if (ret < 0) 1497 if (ret < 0)
1498 goto err5; 1498 goto err5;
1499 1499
1500 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); 1500 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1501 return 0; 1501 return 0;
1502 1502
1503err5: 1503err5:
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 89e20ab494b8..36df2218b669 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -86,7 +86,7 @@ static int ipv6_print_conntrack(struct seq_file *s,
86 * - Note also special handling of AUTH header. Thanks to IPsec wizards. 86 * - Note also special handling of AUTH header. Thanks to IPsec wizards.
87 */ 87 */
88 88
89int nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp, 89int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
90 int len) 90 int len)
91{ 91{
92 u8 nexthdr = *nexthdrp; 92 u8 nexthdr = *nexthdrp;
@@ -117,22 +117,25 @@ int nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp,
117 return start; 117 return start;
118} 118}
119 119
120static int 120static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
121ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, 121 unsigned int *dataoff, u_int8_t *protonum)
122 u_int8_t *protonum)
123{ 122{
124 unsigned int extoff = (u8 *)(ipv6_hdr(*pskb) + 1) - (*pskb)->data; 123 unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
125 unsigned char pnum = ipv6_hdr(*pskb)->nexthdr; 124 unsigned char pnum;
126 int protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum, 125 int protoff;
127 (*pskb)->len - extoff); 126
127 if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
128 &pnum, sizeof(pnum)) != 0) {
129 pr_debug("ip6_conntrack_core: can't get nexthdr\n");
130 return -NF_ACCEPT;
131 }
132 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, skb->len - extoff);
128 /* 133 /*
129 * (protoff == (*pskb)->len) mean that the packet doesn't have no data 134 * (protoff == skb->len) mean that the packet doesn't have no data
130 * except of IPv6 & ext headers. but it's tracked anyway. - YK 135 * except of IPv6 & ext headers. but it's tracked anyway. - YK
131 */ 136 */
132 if ((protoff < 0) || (protoff > (*pskb)->len)) { 137 if ((protoff < 0) || (protoff > skb->len)) {
133 pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); 138 pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
134 NF_CT_STAT_INC_ATOMIC(error);
135 NF_CT_STAT_INC_ATOMIC(invalid);
136 return -NF_ACCEPT; 139 return -NF_ACCEPT;
137 } 140 }
138 141
@@ -370,14 +373,14 @@ static int ipv6_nfattr_to_tuple(struct nfattr *tb[],
370} 373}
371#endif 374#endif
372 375
373struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { 376struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
374 .l3proto = PF_INET6, 377 .l3proto = PF_INET6,
375 .name = "ipv6", 378 .name = "ipv6",
376 .pkt_to_tuple = ipv6_pkt_to_tuple, 379 .pkt_to_tuple = ipv6_pkt_to_tuple,
377 .invert_tuple = ipv6_invert_tuple, 380 .invert_tuple = ipv6_invert_tuple,
378 .print_tuple = ipv6_print_tuple, 381 .print_tuple = ipv6_print_tuple,
379 .print_conntrack = ipv6_print_conntrack, 382 .print_conntrack = ipv6_print_conntrack,
380 .prepare = ipv6_prepare, 383 .get_l4proto = ipv6_get_l4proto,
381#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 384#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
382 .tuple_to_nfattr = ipv6_tuple_to_nfattr, 385 .tuple_to_nfattr = ipv6_tuple_to_nfattr,
383 .nfattr_to_tuple = ipv6_nfattr_to_tuple, 386 .nfattr_to_tuple = ipv6_nfattr_to_tuple,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 9defc7e14554..ab154fb90018 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -136,49 +136,23 @@ icmpv6_error_message(struct sk_buff *skb,
136{ 136{
137 struct nf_conntrack_tuple intuple, origtuple; 137 struct nf_conntrack_tuple intuple, origtuple;
138 struct nf_conntrack_tuple_hash *h; 138 struct nf_conntrack_tuple_hash *h;
139 struct icmp6hdr _hdr, *hp;
140 unsigned int inip6off;
141 struct nf_conntrack_l4proto *inproto; 139 struct nf_conntrack_l4proto *inproto;
142 u_int8_t inprotonum;
143 unsigned int inprotoff;
144 140
145 NF_CT_ASSERT(skb->nfct == NULL); 141 NF_CT_ASSERT(skb->nfct == NULL);
146 142
147 hp = skb_header_pointer(skb, icmp6off, sizeof(_hdr), &_hdr);
148 if (hp == NULL) {
149 pr_debug("icmpv6_error: Can't get ICMPv6 hdr.\n");
150 return -NF_ACCEPT;
151 }
152
153 inip6off = icmp6off + sizeof(_hdr);
154 if (skb_copy_bits(skb, inip6off+offsetof(struct ipv6hdr, nexthdr),
155 &inprotonum, sizeof(inprotonum)) != 0) {
156 pr_debug("icmpv6_error: Can't get nexthdr in inner IPv6 "
157 "header.\n");
158 return -NF_ACCEPT;
159 }
160 inprotoff = nf_ct_ipv6_skip_exthdr(skb,
161 inip6off + sizeof(struct ipv6hdr),
162 &inprotonum,
163 skb->len - inip6off
164 - sizeof(struct ipv6hdr));
165
166 if ((inprotoff > skb->len) || (inprotonum == NEXTHDR_FRAGMENT)) {
167 pr_debug("icmpv6_error: Can't get protocol header in ICMPv6 "
168 "payload.\n");
169 return -NF_ACCEPT;
170 }
171
172 /* rcu_read_lock()ed by nf_hook_slow */
173 inproto = __nf_ct_l4proto_find(PF_INET6, inprotonum);
174
175 /* Are they talking about one of our connections? */ 143 /* Are they talking about one of our connections? */
176 if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum, 144 if (!nf_ct_get_tuplepr(skb,
177 &origtuple, &nf_conntrack_l3proto_ipv6, inproto)) { 145 skb_network_offset(skb)
146 + sizeof(struct ipv6hdr)
147 + sizeof(struct icmp6hdr),
148 PF_INET6, &origtuple)) {
178 pr_debug("icmpv6_error: Can't get tuple\n"); 149 pr_debug("icmpv6_error: Can't get tuple\n");
179 return -NF_ACCEPT; 150 return -NF_ACCEPT;
180 } 151 }
181 152
153 /* rcu_read_lock()ed by nf_hook_slow */
154 inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum);
155
182 /* Ordinarily, we'd expect the inverted tupleproto, but it's 156 /* Ordinarily, we'd expect the inverted tupleproto, but it's
183 been preserved inside the ICMP. */ 157 been preserved inside the ICMP. */
184 if (!nf_ct_invert_tuple(&intuple, &origtuple, 158 if (!nf_ct_invert_tuple(&intuple, &origtuple,
@@ -302,7 +276,7 @@ static struct ctl_table icmpv6_sysctl_table[] = {
302}; 276};
303#endif /* CONFIG_SYSCTL */ 277#endif /* CONFIG_SYSCTL */
304 278
305struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = 279struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
306{ 280{
307 .l3proto = PF_INET6, 281 .l3proto = PF_INET6,
308 .l4proto = IPPROTO_ICMPV6, 282 .l4proto = IPPROTO_ICMPV6,
@@ -323,5 +297,3 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
323 .ctl_table = icmpv6_sysctl_table, 297 .ctl_table = icmpv6_sysctl_table,
324#endif 298#endif
325}; 299};
326
327EXPORT_SYMBOL(nf_conntrack_l4proto_icmpv6);
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index f8fcc3d10327..16ce9cd4f39e 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,13 +1,13 @@
1config IUCV 1config IUCV
2 tristate "IUCV support (VM only)" 2 tristate "IUCV support (S390 - z/VM only)"
3 depends on S390 3 depends on S390
4 help 4 help
5 Select this option if you want to use inter-user communication under 5 Select this option if you want to use inter-user communication
6 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast 6 under VM or VIF. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests. 7 communication link between VM guests.
8 8
9config AFIUCV 9config AFIUCV
10 tristate "AF_IUCV support (VM only)" 10 tristate "AF_IUCV support (S390 - z/VM only)"
11 depends on IUCV 11 depends on IUCV
12 help 12 help
13 Select this option if you want to use inter-user communication under 13 Select this option if you want to use inter-user communication under
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d9e9ddb8eac5..53ae14c35f70 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
219 219
220 sock_init_data(sock, sk); 220 sock_init_data(sock, sk);
221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
222 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
222 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 223 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
223 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 224 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
224 iucv_sk(sk)->send_tag = 0; 225 iucv_sk(sk)->send_tag = 0;
@@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
274 275
275void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 276void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
276{ 277{
278 unsigned long flags;
279 struct iucv_sock *par = iucv_sk(parent);
280
277 sock_hold(sk); 281 sock_hold(sk);
278 list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q); 282 spin_lock_irqsave(&par->accept_q_lock, flags);
283 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
284 spin_unlock_irqrestore(&par->accept_q_lock, flags);
279 iucv_sk(sk)->parent = parent; 285 iucv_sk(sk)->parent = parent;
280 parent->sk_ack_backlog++; 286 parent->sk_ack_backlog++;
281} 287}
282 288
283void iucv_accept_unlink(struct sock *sk) 289void iucv_accept_unlink(struct sock *sk)
284{ 290{
291 unsigned long flags;
292 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
293
294 spin_lock_irqsave(&par->accept_q_lock, flags);
285 list_del_init(&iucv_sk(sk)->accept_q); 295 list_del_init(&iucv_sk(sk)->accept_q);
296 spin_unlock_irqrestore(&par->accept_q_lock, flags);
286 iucv_sk(sk)->parent->sk_ack_backlog--; 297 iucv_sk(sk)->parent->sk_ack_backlog--;
287 iucv_sk(sk)->parent = NULL; 298 iucv_sk(sk)->parent = NULL;
288 sock_put(sk); 299 sock_put(sk);
@@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
298 lock_sock(sk); 309 lock_sock(sk);
299 310
300 if (sk->sk_state == IUCV_CLOSED) { 311 if (sk->sk_state == IUCV_CLOSED) {
301 release_sock(sk);
302 iucv_accept_unlink(sk); 312 iucv_accept_unlink(sk);
313 release_sock(sk);
303 continue; 314 continue;
304 } 315 }
305 316
@@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
879 /* Find out if this path belongs to af_iucv. */ 890 /* Find out if this path belongs to af_iucv. */
880 read_lock(&iucv_sk_list.lock); 891 read_lock(&iucv_sk_list.lock);
881 iucv = NULL; 892 iucv = NULL;
893 sk = NULL;
882 sk_for_each(sk, node, &iucv_sk_list.head) 894 sk_for_each(sk, node, &iucv_sk_list.head)
883 if (sk->sk_state == IUCV_LISTEN && 895 if (sk->sk_state == IUCV_LISTEN &&
884 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 896 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index b7333061016d..ad5150b8dfa9 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1494,7 +1494,10 @@ static void iucv_tasklet_fn(unsigned long ignored)
1494 struct iucv_irq_list *p, *n; 1494 struct iucv_irq_list *p, *n;
1495 1495
1496 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1496 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1497 spin_lock(&iucv_table_lock); 1497 if (!spin_trylock(&iucv_table_lock)) {
1498 tasklet_schedule(&iucv_tasklet);
1499 return;
1500 }
1498 iucv_active_cpu = smp_processor_id(); 1501 iucv_active_cpu = smp_processor_id();
1499 1502
1500 spin_lock_irq(&iucv_queue_lock); 1503 spin_lock_irq(&iucv_queue_lock);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9e3964638bad..a3e01d76d503 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -118,7 +118,7 @@ static ssize_t ieee80211_if_fmt_flags(
118 sdata->u.sta.authenticated ? "AUTH\n" : "", 118 sdata->u.sta.authenticated ? "AUTH\n" : "",
119 sdata->u.sta.associated ? "ASSOC\n" : "", 119 sdata->u.sta.associated ? "ASSOC\n" : "",
120 sdata->u.sta.probereq_poll ? "PROBEREQ POLL\n" : "", 120 sdata->u.sta.probereq_poll ? "PROBEREQ POLL\n" : "",
121 sdata->u.sta.use_protection ? "CTS prot\n" : ""); 121 sdata->use_protection ? "CTS prot\n" : "");
122} 122}
123__IEEE80211_IF_FILE(flags); 123__IEEE80211_IF_FILE(flags);
124 124
diff --git a/net/mac80211/hostapd_ioctl.h b/net/mac80211/hostapd_ioctl.h
index 34fa128e9872..52da513f060a 100644
--- a/net/mac80211/hostapd_ioctl.h
+++ b/net/mac80211/hostapd_ioctl.h
@@ -26,24 +26,16 @@
26 * mess shall be deleted completely. */ 26 * mess shall be deleted completely. */
27enum { 27enum {
28 PRISM2_PARAM_IEEE_802_1X = 23, 28 PRISM2_PARAM_IEEE_802_1X = 23,
29 PRISM2_PARAM_ANTSEL_TX = 24,
30 PRISM2_PARAM_ANTSEL_RX = 25,
31 29
32 /* Instant802 additions */ 30 /* Instant802 additions */
33 PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES = 1001, 31 PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES = 1001,
34 PRISM2_PARAM_DROP_UNENCRYPTED = 1002,
35 PRISM2_PARAM_PREAMBLE = 1003, 32 PRISM2_PARAM_PREAMBLE = 1003,
36 PRISM2_PARAM_SHORT_SLOT_TIME = 1006, 33 PRISM2_PARAM_SHORT_SLOT_TIME = 1006,
37 PRISM2_PARAM_NEXT_MODE = 1008, 34 PRISM2_PARAM_NEXT_MODE = 1008,
38 PRISM2_PARAM_CLEAR_KEYS = 1009,
39 PRISM2_PARAM_RADIO_ENABLED = 1010, 35 PRISM2_PARAM_RADIO_ENABLED = 1010,
40 PRISM2_PARAM_ANTENNA_MODE = 1013, 36 PRISM2_PARAM_ANTENNA_MODE = 1013,
41 PRISM2_PARAM_STAT_TIME = 1016, 37 PRISM2_PARAM_STAT_TIME = 1016,
42 PRISM2_PARAM_STA_ANTENNA_SEL = 1017, 38 PRISM2_PARAM_STA_ANTENNA_SEL = 1017,
43 PRISM2_PARAM_FORCE_UNICAST_RATE = 1018,
44 PRISM2_PARAM_RATE_CTRL_NUM_UP = 1019,
45 PRISM2_PARAM_RATE_CTRL_NUM_DOWN = 1020,
46 PRISM2_PARAM_MAX_RATECTRL_RATE = 1021,
47 PRISM2_PARAM_TX_POWER_REDUCTION = 1022, 39 PRISM2_PARAM_TX_POWER_REDUCTION = 1022,
48 PRISM2_PARAM_KEY_TX_RX_THRESHOLD = 1024, 40 PRISM2_PARAM_KEY_TX_RX_THRESHOLD = 1024,
49 PRISM2_PARAM_DEFAULT_WEP_ONLY = 1026, 41 PRISM2_PARAM_DEFAULT_WEP_ONLY = 1026,
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 4e84f24fd439..2ddf4ef4065e 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -24,6 +24,7 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/bitmap.h> 25#include <linux/bitmap.h>
26#include <net/cfg80211.h> 26#include <net/cfg80211.h>
27#include <asm/unaligned.h>
27 28
28#include "ieee80211_common.h" 29#include "ieee80211_common.h"
29#include "ieee80211_i.h" 30#include "ieee80211_i.h"
@@ -56,6 +57,17 @@ static const unsigned char eapol_header[] =
56 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00, 0x88, 0x8e }; 57 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00, 0x88, 0x8e };
57 58
58 59
60/*
61 * For seeing transmitted packets on monitor interfaces
62 * we have a radiotap header too.
63 */
64struct ieee80211_tx_status_rtap_hdr {
65 struct ieee80211_radiotap_header hdr;
66 __le16 tx_flags;
67 u8 data_retries;
68} __attribute__ ((packed));
69
70
59static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata, 71static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata,
60 struct ieee80211_hdr *hdr) 72 struct ieee80211_hdr *hdr)
61{ 73{
@@ -430,7 +442,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx)
430 if (!tx->u.tx.rate) 442 if (!tx->u.tx.rate)
431 return TXRX_DROP; 443 return TXRX_DROP;
432 if (tx->u.tx.mode->mode == MODE_IEEE80211G && 444 if (tx->u.tx.mode->mode == MODE_IEEE80211G &&
433 tx->local->cts_protect_erp_frames && tx->fragmented && 445 tx->sdata->use_protection && tx->fragmented &&
434 extra.nonerp) { 446 extra.nonerp) {
435 tx->u.tx.last_frag_rate = tx->u.tx.rate; 447 tx->u.tx.last_frag_rate = tx->u.tx.rate;
436 tx->u.tx.probe_last_frag = extra.probe ? 1 : 0; 448 tx->u.tx.probe_last_frag = extra.probe ? 1 : 0;
@@ -528,7 +540,7 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
528 /* reserve enough extra head and tail room for possible 540 /* reserve enough extra head and tail room for possible
529 * encryption */ 541 * encryption */
530 frag = frags[i] = 542 frag = frags[i] =
531 dev_alloc_skb(tx->local->hw.extra_tx_headroom + 543 dev_alloc_skb(tx->local->tx_headroom +
532 frag_threshold + 544 frag_threshold +
533 IEEE80211_ENCRYPT_HEADROOM + 545 IEEE80211_ENCRYPT_HEADROOM +
534 IEEE80211_ENCRYPT_TAILROOM); 546 IEEE80211_ENCRYPT_TAILROOM);
@@ -537,8 +549,8 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
537 /* Make sure that all fragments use the same priority so 549 /* Make sure that all fragments use the same priority so
538 * that they end up using the same TX queue */ 550 * that they end up using the same TX queue */
539 frag->priority = first->priority; 551 frag->priority = first->priority;
540 skb_reserve(frag, tx->local->hw.extra_tx_headroom + 552 skb_reserve(frag, tx->local->tx_headroom +
541 IEEE80211_ENCRYPT_HEADROOM); 553 IEEE80211_ENCRYPT_HEADROOM);
542 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); 554 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen);
543 memcpy(fhdr, first->data, hdrlen); 555 memcpy(fhdr, first->data, hdrlen);
544 if (i == num_fragm - 2) 556 if (i == num_fragm - 2)
@@ -856,8 +868,7 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
856 * for the frame. */ 868 * for the frame. */
857 if (mode->mode == MODE_IEEE80211G && 869 if (mode->mode == MODE_IEEE80211G &&
858 (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) && 870 (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) &&
859 tx->u.tx.unicast && 871 tx->u.tx.unicast && tx->sdata->use_protection &&
860 tx->local->cts_protect_erp_frames &&
861 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) 872 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
862 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; 873 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT;
863 874
@@ -1118,7 +1129,138 @@ ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx)
1118} 1129}
1119 1130
1120 1131
1121static void inline 1132/*
1133 * deal with packet injection down monitor interface
1134 * with Radiotap Header -- only called for monitor mode interface
1135 */
1136
1137static ieee80211_txrx_result
1138__ieee80211_parse_tx_radiotap(
1139 struct ieee80211_txrx_data *tx,
1140 struct sk_buff *skb, struct ieee80211_tx_control *control)
1141{
1142 /*
1143 * this is the moment to interpret and discard the radiotap header that
1144 * must be at the start of the packet injected in Monitor mode
1145 *
1146 * Need to take some care with endian-ness since radiotap
1147 * args are little-endian
1148 */
1149
1150 struct ieee80211_radiotap_iterator iterator;
1151 struct ieee80211_radiotap_header *rthdr =
1152 (struct ieee80211_radiotap_header *) skb->data;
1153 struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode;
1154 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
1155
1156 /*
1157 * default control situation for all injected packets
1158 * FIXME: this does not suit all usage cases, expand to allow control
1159 */
1160
1161 control->retry_limit = 1; /* no retry */
1162 control->key_idx = -1; /* no encryption key */
1163 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1164 IEEE80211_TXCTL_USE_CTS_PROTECT);
1165 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT |
1166 IEEE80211_TXCTL_NO_ACK;
1167 control->antenna_sel_tx = 0; /* default to default antenna */
1168
1169 /*
1170 * for every radiotap entry that is present
1171 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
1172 * entries present, or -EINVAL on error)
1173 */
1174
1175 while (!ret) {
1176 int i, target_rate;
1177
1178 ret = ieee80211_radiotap_iterator_next(&iterator);
1179
1180 if (ret)
1181 continue;
1182
1183 /* see if this argument is something we can use */
1184 switch (iterator.this_arg_index) {
1185 /*
1186 * You must take care when dereferencing iterator.this_arg
1187 * for multibyte types... the pointer is not aligned. Use
1188 * get_unaligned((type *)iterator.this_arg) to dereference
1189 * iterator.this_arg for type "type" safely on all arches.
1190 */
1191 case IEEE80211_RADIOTAP_RATE:
1192 /*
1193 * radiotap rate u8 is in 500kbps units eg, 0x02=1Mbps
1194 * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps
1195 */
1196 target_rate = (*iterator.this_arg) * 5;
1197 for (i = 0; i < mode->num_rates; i++) {
1198 struct ieee80211_rate *r = &mode->rates[i];
1199
1200 if (r->rate > target_rate)
1201 continue;
1202
1203 control->rate = r;
1204
1205 if (r->flags & IEEE80211_RATE_PREAMBLE2)
1206 control->tx_rate = r->val2;
1207 else
1208 control->tx_rate = r->val;
1209
1210 /* end on exact match */
1211 if (r->rate == target_rate)
1212 i = mode->num_rates;
1213 }
1214 break;
1215
1216 case IEEE80211_RADIOTAP_ANTENNA:
1217 /*
1218 * radiotap uses 0 for 1st ant, mac80211 is 1 for
1219 * 1st ant
1220 */
1221 control->antenna_sel_tx = (*iterator.this_arg) + 1;
1222 break;
1223
1224 case IEEE80211_RADIOTAP_DBM_TX_POWER:
1225 control->power_level = *iterator.this_arg;
1226 break;
1227
1228 case IEEE80211_RADIOTAP_FLAGS:
1229 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
1230 /*
1231 * this indicates that the skb we have been
1232 * handed has the 32-bit FCS CRC at the end...
1233 * we should react to that by snipping it off
1234 * because it will be recomputed and added
1235 * on transmission
1236 */
1237 if (skb->len < (iterator.max_length + FCS_LEN))
1238 return TXRX_DROP;
1239
1240 skb_trim(skb, skb->len - FCS_LEN);
1241 }
1242 break;
1243
1244 default:
1245 break;
1246 }
1247 }
1248
1249 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
1250 return TXRX_DROP;
1251
1252 /*
1253 * remove the radiotap header
1254 * iterator->max_length was sanity-checked against
1255 * skb->len by iterator init
1256 */
1257 skb_pull(skb, iterator.max_length);
1258
1259 return TXRX_CONTINUE;
1260}
1261
1262
1263static ieee80211_txrx_result inline
1122__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, 1264__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
1123 struct sk_buff *skb, 1265 struct sk_buff *skb,
1124 struct net_device *dev, 1266 struct net_device *dev,
@@ -1126,6 +1268,9 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
1126{ 1268{
1127 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1269 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1128 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1270 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1271 struct ieee80211_sub_if_data *sdata;
1272 ieee80211_txrx_result res = TXRX_CONTINUE;
1273
1129 int hdrlen; 1274 int hdrlen;
1130 1275
1131 memset(tx, 0, sizeof(*tx)); 1276 memset(tx, 0, sizeof(*tx));
@@ -1135,7 +1280,32 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
1135 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1280 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1136 tx->sta = sta_info_get(local, hdr->addr1); 1281 tx->sta = sta_info_get(local, hdr->addr1);
1137 tx->fc = le16_to_cpu(hdr->frame_control); 1282 tx->fc = le16_to_cpu(hdr->frame_control);
1283
1284 /*
1285 * set defaults for things that can be set by
1286 * injected radiotap headers
1287 */
1138 control->power_level = local->hw.conf.power_level; 1288 control->power_level = local->hw.conf.power_level;
1289 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1290 if (local->sta_antenna_sel != STA_ANTENNA_SEL_AUTO && tx->sta)
1291 control->antenna_sel_tx = tx->sta->antenna_sel_tx;
1292
1293 /* process and remove the injection radiotap header */
1294 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1295 if (unlikely(sdata->type == IEEE80211_IF_TYPE_MNTR)) {
1296 if (__ieee80211_parse_tx_radiotap(tx, skb, control) ==
1297 TXRX_DROP) {
1298 return TXRX_DROP;
1299 }
1300 /*
1301 * we removed the radiotap header after this point,
1302 * we filled control with what we could use
1303 * set to the actual ieee header now
1304 */
1305 hdr = (struct ieee80211_hdr *) skb->data;
1306 res = TXRX_QUEUED; /* indication it was monitor packet */
1307 }
1308
1139 tx->u.tx.control = control; 1309 tx->u.tx.control = control;
1140 tx->u.tx.unicast = !is_multicast_ether_addr(hdr->addr1); 1310 tx->u.tx.unicast = !is_multicast_ether_addr(hdr->addr1);
1141 if (is_multicast_ether_addr(hdr->addr1)) 1311 if (is_multicast_ether_addr(hdr->addr1))
@@ -1152,9 +1322,6 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
1152 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1322 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK;
1153 tx->sta->clear_dst_mask = 0; 1323 tx->sta->clear_dst_mask = 0;
1154 } 1324 }
1155 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1156 if (local->sta_antenna_sel != STA_ANTENNA_SEL_AUTO && tx->sta)
1157 control->antenna_sel_tx = tx->sta->antenna_sel_tx;
1158 hdrlen = ieee80211_get_hdrlen(tx->fc); 1325 hdrlen = ieee80211_get_hdrlen(tx->fc);
1159 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 1326 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1160 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 1327 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
@@ -1162,6 +1329,7 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
1162 } 1329 }
1163 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; 1330 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT;
1164 1331
1332 return res;
1165} 1333}
1166 1334
1167static int inline is_ieee80211_device(struct net_device *dev, 1335static int inline is_ieee80211_device(struct net_device *dev,
@@ -1274,7 +1442,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1274 struct sta_info *sta; 1442 struct sta_info *sta;
1275 ieee80211_tx_handler *handler; 1443 ieee80211_tx_handler *handler;
1276 struct ieee80211_txrx_data tx; 1444 struct ieee80211_txrx_data tx;
1277 ieee80211_txrx_result res = TXRX_DROP; 1445 ieee80211_txrx_result res = TXRX_DROP, res_prepare;
1278 int ret, i; 1446 int ret, i;
1279 1447
1280 WARN_ON(__ieee80211_queue_pending(local, control->queue)); 1448 WARN_ON(__ieee80211_queue_pending(local, control->queue));
@@ -1284,15 +1452,26 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1284 return 0; 1452 return 0;
1285 } 1453 }
1286 1454
1287 __ieee80211_tx_prepare(&tx, skb, dev, control); 1455 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control);
1456
1457 if (res_prepare == TXRX_DROP) {
1458 dev_kfree_skb(skb);
1459 return 0;
1460 }
1461
1288 sta = tx.sta; 1462 sta = tx.sta;
1289 tx.u.tx.mgmt_interface = mgmt; 1463 tx.u.tx.mgmt_interface = mgmt;
1290 tx.u.tx.mode = local->hw.conf.mode; 1464 tx.u.tx.mode = local->hw.conf.mode;
1291 1465
1292 for (handler = local->tx_handlers; *handler != NULL; handler++) { 1466 if (res_prepare == TXRX_QUEUED) { /* if it was an injected packet */
1293 res = (*handler)(&tx); 1467 res = TXRX_CONTINUE;
1294 if (res != TXRX_CONTINUE) 1468 } else {
1295 break; 1469 for (handler = local->tx_handlers; *handler != NULL;
1470 handler++) {
1471 res = (*handler)(&tx);
1472 if (res != TXRX_CONTINUE)
1473 break;
1474 }
1296 } 1475 }
1297 1476
1298 skb = tx.skb; /* handlers are allowed to change skb */ 1477 skb = tx.skb; /* handlers are allowed to change skb */
@@ -1467,8 +1646,7 @@ static int ieee80211_master_start_xmit(struct sk_buff *skb,
1467 } 1646 }
1468 osdata = IEEE80211_DEV_TO_SUB_IF(odev); 1647 osdata = IEEE80211_DEV_TO_SUB_IF(odev);
1469 1648
1470 headroom = osdata->local->hw.extra_tx_headroom + 1649 headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM;
1471 IEEE80211_ENCRYPT_HEADROOM;
1472 if (skb_headroom(skb) < headroom) { 1650 if (skb_headroom(skb) < headroom) {
1473 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { 1651 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1474 dev_kfree_skb(skb); 1652 dev_kfree_skb(skb);
@@ -1494,6 +1672,56 @@ static int ieee80211_master_start_xmit(struct sk_buff *skb,
1494} 1672}
1495 1673
1496 1674
1675int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1676 struct net_device *dev)
1677{
1678 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1679 struct ieee80211_tx_packet_data *pkt_data;
1680 struct ieee80211_radiotap_header *prthdr =
1681 (struct ieee80211_radiotap_header *)skb->data;
1682 u16 len;
1683
1684 /*
1685 * there must be a radiotap header at the
1686 * start in this case
1687 */
1688 if (unlikely(prthdr->it_version)) {
1689 /* only version 0 is supported */
1690 dev_kfree_skb(skb);
1691 return NETDEV_TX_OK;
1692 }
1693
1694 skb->dev = local->mdev;
1695
1696 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1697 memset(pkt_data, 0, sizeof(*pkt_data));
1698 pkt_data->ifindex = dev->ifindex;
1699 pkt_data->mgmt_iface = 0;
1700 pkt_data->do_not_encrypt = 1;
1701
1702 /* above needed because we set skb device to master */
1703
1704 /*
1705 * fix up the pointers accounting for the radiotap
1706 * header still being in there. We are being given
1707 * a precooked IEEE80211 header so no need for
1708 * normal processing
1709 */
1710 len = le16_to_cpu(get_unaligned(&prthdr->it_len));
1711 skb_set_mac_header(skb, len);
1712 skb_set_network_header(skb, len + sizeof(struct ieee80211_hdr));
1713 skb_set_transport_header(skb, len + sizeof(struct ieee80211_hdr));
1714
1715 /*
1716 * pass the radiotap header up to
1717 * the next stage intact
1718 */
1719 dev_queue_xmit(skb);
1720
1721 return NETDEV_TX_OK;
1722}
1723
1724
1497/** 1725/**
1498 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type 1726 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
1499 * subinterfaces (wlan#, WDS, and VLAN interfaces) 1727 * subinterfaces (wlan#, WDS, and VLAN interfaces)
@@ -1509,8 +1737,8 @@ static int ieee80211_master_start_xmit(struct sk_buff *skb,
1509 * encapsulated packet will then be passed to master interface, wlan#.11, for 1737 * encapsulated packet will then be passed to master interface, wlan#.11, for
1510 * transmission (through low-level driver). 1738 * transmission (through low-level driver).
1511 */ 1739 */
1512static int ieee80211_subif_start_xmit(struct sk_buff *skb, 1740int ieee80211_subif_start_xmit(struct sk_buff *skb,
1513 struct net_device *dev) 1741 struct net_device *dev)
1514{ 1742{
1515 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1743 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1516 struct ieee80211_tx_packet_data *pkt_data; 1744 struct ieee80211_tx_packet_data *pkt_data;
@@ -1619,7 +1847,7 @@ static int ieee80211_subif_start_xmit(struct sk_buff *skb,
1619 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and 1847 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1620 * alloc_skb() (net/core/skbuff.c) 1848 * alloc_skb() (net/core/skbuff.c)
1621 */ 1849 */
1622 head_need = hdrlen + encaps_len + local->hw.extra_tx_headroom; 1850 head_need = hdrlen + encaps_len + local->tx_headroom;
1623 head_need -= skb_headroom(skb); 1851 head_need -= skb_headroom(skb);
1624 1852
1625 /* We are going to modify skb data, so make a copy of it if happens to 1853 /* We are going to modify skb data, so make a copy of it if happens to
@@ -1658,7 +1886,7 @@ static int ieee80211_subif_start_xmit(struct sk_buff *skb,
1658 1886
1659 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1887 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1660 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 1888 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data));
1661 pkt_data->ifindex = sdata->dev->ifindex; 1889 pkt_data->ifindex = dev->ifindex;
1662 pkt_data->mgmt_iface = (sdata->type == IEEE80211_IF_TYPE_MGMT); 1890 pkt_data->mgmt_iface = (sdata->type == IEEE80211_IF_TYPE_MGMT);
1663 pkt_data->do_not_encrypt = no_encrypt; 1891 pkt_data->do_not_encrypt = no_encrypt;
1664 1892
@@ -1706,9 +1934,9 @@ ieee80211_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
1706 return 0; 1934 return 0;
1707 } 1935 }
1708 1936
1709 if (skb_headroom(skb) < sdata->local->hw.extra_tx_headroom) { 1937 if (skb_headroom(skb) < sdata->local->tx_headroom) {
1710 if (pskb_expand_head(skb, 1938 if (pskb_expand_head(skb, sdata->local->tx_headroom,
1711 sdata->local->hw.extra_tx_headroom, 0, GFP_ATOMIC)) { 1939 0, GFP_ATOMIC)) {
1712 dev_kfree_skb(skb); 1940 dev_kfree_skb(skb);
1713 return 0; 1941 return 0;
1714 } 1942 }
@@ -1847,12 +2075,12 @@ struct sk_buff * ieee80211_beacon_get(struct ieee80211_hw *hw, int if_id,
1847 bh_len = ap->beacon_head_len; 2075 bh_len = ap->beacon_head_len;
1848 bt_len = ap->beacon_tail_len; 2076 bt_len = ap->beacon_tail_len;
1849 2077
1850 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 2078 skb = dev_alloc_skb(local->tx_headroom +
1851 bh_len + bt_len + 256 /* maximum TIM len */); 2079 bh_len + bt_len + 256 /* maximum TIM len */);
1852 if (!skb) 2080 if (!skb)
1853 return NULL; 2081 return NULL;
1854 2082
1855 skb_reserve(skb, local->hw.extra_tx_headroom); 2083 skb_reserve(skb, local->tx_headroom);
1856 memcpy(skb_put(skb, bh_len), b_head, bh_len); 2084 memcpy(skb_put(skb, bh_len), b_head, bh_len);
1857 2085
1858 ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data); 2086 ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data);
@@ -2376,8 +2604,7 @@ static void ieee80211_start_hard_monitor(struct ieee80211_local *local)
2376 struct ieee80211_if_init_conf conf; 2604 struct ieee80211_if_init_conf conf;
2377 2605
2378 if (local->open_count && local->open_count == local->monitors && 2606 if (local->open_count && local->open_count == local->monitors &&
2379 !(local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER) && 2607 !(local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER)) {
2380 local->ops->add_interface) {
2381 conf.if_id = -1; 2608 conf.if_id = -1;
2382 conf.type = IEEE80211_IF_TYPE_MNTR; 2609 conf.type = IEEE80211_IF_TYPE_MNTR;
2383 conf.mac_addr = NULL; 2610 conf.mac_addr = NULL;
@@ -2420,21 +2647,14 @@ static int ieee80211_open(struct net_device *dev)
2420 } 2647 }
2421 ieee80211_start_soft_monitor(local); 2648 ieee80211_start_soft_monitor(local);
2422 2649
2423 if (local->ops->add_interface) { 2650 conf.if_id = dev->ifindex;
2424 conf.if_id = dev->ifindex; 2651 conf.type = sdata->type;
2425 conf.type = sdata->type; 2652 conf.mac_addr = dev->dev_addr;
2426 conf.mac_addr = dev->dev_addr; 2653 res = local->ops->add_interface(local_to_hw(local), &conf);
2427 res = local->ops->add_interface(local_to_hw(local), &conf); 2654 if (res) {
2428 if (res) { 2655 if (sdata->type == IEEE80211_IF_TYPE_MNTR)
2429 if (sdata->type == IEEE80211_IF_TYPE_MNTR) 2656 ieee80211_start_hard_monitor(local);
2430 ieee80211_start_hard_monitor(local); 2657 return res;
2431 return res;
2432 }
2433 } else {
2434 if (sdata->type != IEEE80211_IF_TYPE_STA)
2435 return -EOPNOTSUPP;
2436 if (local->open_count > 0)
2437 return -ENOBUFS;
2438 } 2658 }
2439 2659
2440 if (local->open_count == 0) { 2660 if (local->open_count == 0) {
@@ -2941,34 +3161,6 @@ int ieee80211_radar_status(struct ieee80211_hw *hw, int channel,
2941} 3161}
2942EXPORT_SYMBOL(ieee80211_radar_status); 3162EXPORT_SYMBOL(ieee80211_radar_status);
2943 3163
2944int ieee80211_set_aid_for_sta(struct ieee80211_hw *hw, u8 *peer_address,
2945 u16 aid)
2946{
2947 struct sk_buff *skb;
2948 struct ieee80211_msg_set_aid_for_sta *msg;
2949 struct ieee80211_local *local = hw_to_local(hw);
2950
2951 /* unlikely because if this event only happens for APs,
2952 * which require an open ap device. */
2953 if (unlikely(!local->apdev))
2954 return 0;
2955
2956 skb = dev_alloc_skb(sizeof(struct ieee80211_frame_info) +
2957 sizeof(struct ieee80211_msg_set_aid_for_sta));
2958
2959 if (!skb)
2960 return -ENOMEM;
2961 skb_reserve(skb, sizeof(struct ieee80211_frame_info));
2962
2963 msg = (struct ieee80211_msg_set_aid_for_sta *)
2964 skb_put(skb, sizeof(struct ieee80211_msg_set_aid_for_sta));
2965 memcpy(msg->sta_address, peer_address, ETH_ALEN);
2966 msg->aid = aid;
2967
2968 ieee80211_rx_mgmt(local, skb, NULL, ieee80211_msg_set_aid_for_sta);
2969 return 0;
2970}
2971EXPORT_SYMBOL(ieee80211_set_aid_for_sta);
2972 3164
2973static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) 3165static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
2974{ 3166{
@@ -4284,6 +4476,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
4284 struct ieee80211_local *local = hw_to_local(hw); 4476 struct ieee80211_local *local = hw_to_local(hw);
4285 u16 frag, type; 4477 u16 frag, type;
4286 u32 msg_type; 4478 u32 msg_type;
4479 struct ieee80211_tx_status_rtap_hdr *rthdr;
4480 struct ieee80211_sub_if_data *sdata;
4481 int monitors;
4287 4482
4288 if (!status) { 4483 if (!status) {
4289 printk(KERN_ERR 4484 printk(KERN_ERR
@@ -4395,27 +4590,100 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
4395 local->dot11FailedCount++; 4590 local->dot11FailedCount++;
4396 } 4591 }
4397 4592
4398 if (!(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS) 4593 msg_type = (status->flags & IEEE80211_TX_STATUS_ACK) ?
4399 || unlikely(!local->apdev)) { 4594 ieee80211_msg_tx_callback_ack : ieee80211_msg_tx_callback_fail;
4595
4596 /* this was a transmitted frame, but now we want to reuse it */
4597 skb_orphan(skb);
4598
4599 if ((status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS) &&
4600 local->apdev) {
4601 if (local->monitors) {
4602 skb2 = skb_clone(skb, GFP_ATOMIC);
4603 } else {
4604 skb2 = skb;
4605 skb = NULL;
4606 }
4607
4608 if (skb2)
4609 /* Send frame to hostapd */
4610 ieee80211_rx_mgmt(local, skb2, NULL, msg_type);
4611
4612 if (!skb)
4613 return;
4614 }
4615
4616 if (!local->monitors) {
4400 dev_kfree_skb(skb); 4617 dev_kfree_skb(skb);
4401 return; 4618 return;
4402 } 4619 }
4403 4620
4404 msg_type = (status->flags & IEEE80211_TX_STATUS_ACK) ? 4621 /* send frame to monitor interfaces now */
4405 ieee80211_msg_tx_callback_ack : ieee80211_msg_tx_callback_fail;
4406 4622
4407 /* skb was the original skb used for TX. Clone it and give the clone 4623 if (skb_headroom(skb) < sizeof(*rthdr)) {
4408 * to netif_rx(). Free original skb. */ 4624 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
4409 skb2 = skb_copy(skb, GFP_ATOMIC);
4410 if (!skb2) {
4411 dev_kfree_skb(skb); 4625 dev_kfree_skb(skb);
4412 return; 4626 return;
4413 } 4627 }
4414 dev_kfree_skb(skb);
4415 skb = skb2;
4416 4628
4417 /* Send frame to hostapd */ 4629 rthdr = (struct ieee80211_tx_status_rtap_hdr*)
4418 ieee80211_rx_mgmt(local, skb, NULL, msg_type); 4630 skb_push(skb, sizeof(*rthdr));
4631
4632 memset(rthdr, 0, sizeof(*rthdr));
4633 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
4634 rthdr->hdr.it_present =
4635 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
4636 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
4637
4638 if (!(status->flags & IEEE80211_TX_STATUS_ACK) &&
4639 !is_multicast_ether_addr(hdr->addr1))
4640 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
4641
4642 if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) &&
4643 (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT))
4644 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
4645 else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS)
4646 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
4647
4648 rthdr->data_retries = status->retry_count;
4649
4650 read_lock(&local->sub_if_lock);
4651 monitors = local->monitors;
4652 list_for_each_entry(sdata, &local->sub_if_list, list) {
4653 /*
4654 * Using the monitors counter is possibly racy, but
4655 * if the value is wrong we simply either clone the skb
4656 * once too much or forget sending it to one monitor iface
4657 * The latter case isn't nice but fixing the race is much
4658 * more complicated.
4659 */
4660 if (!monitors || !skb)
4661 goto out;
4662
4663 if (sdata->type == IEEE80211_IF_TYPE_MNTR) {
4664 if (!netif_running(sdata->dev))
4665 continue;
4666 monitors--;
4667 if (monitors)
4668 skb2 = skb_clone(skb, GFP_KERNEL);
4669 else
4670 skb2 = NULL;
4671 skb->dev = sdata->dev;
4672 /* XXX: is this sufficient for BPF? */
4673 skb_set_mac_header(skb, 0);
4674 skb->ip_summed = CHECKSUM_UNNECESSARY;
4675 skb->pkt_type = PACKET_OTHERHOST;
4676 skb->protocol = htons(ETH_P_802_2);
4677 memset(skb->cb, 0, sizeof(skb->cb));
4678 netif_rx(skb);
4679 skb = skb2;
4680 break;
4681 }
4682 }
4683 out:
4684 read_unlock(&local->sub_if_lock);
4685 if (skb)
4686 dev_kfree_skb(skb);
4419} 4687}
4420EXPORT_SYMBOL(ieee80211_tx_status); 4688EXPORT_SYMBOL(ieee80211_tx_status);
4421 4689
@@ -4619,6 +4887,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
4619 ((sizeof(struct ieee80211_local) + 4887 ((sizeof(struct ieee80211_local) +
4620 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 4888 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4621 4889
4890 BUG_ON(!ops->tx);
4891 BUG_ON(!ops->config);
4892 BUG_ON(!ops->add_interface);
4622 local->ops = ops; 4893 local->ops = ops;
4623 4894
4624 /* for now, mdev needs sub_if_data :/ */ 4895 /* for now, mdev needs sub_if_data :/ */
@@ -4647,8 +4918,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
4647 local->short_retry_limit = 7; 4918 local->short_retry_limit = 7;
4648 local->long_retry_limit = 4; 4919 local->long_retry_limit = 4;
4649 local->hw.conf.radio_enabled = 1; 4920 local->hw.conf.radio_enabled = 1;
4650 local->rate_ctrl_num_up = RATE_CONTROL_NUM_UP;
4651 local->rate_ctrl_num_down = RATE_CONTROL_NUM_DOWN;
4652 4921
4653 local->enabled_modes = (unsigned int) -1; 4922 local->enabled_modes = (unsigned int) -1;
4654 4923
@@ -4712,6 +4981,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
4712 goto fail_workqueue; 4981 goto fail_workqueue;
4713 } 4982 }
4714 4983
4984 /*
4985 * The hardware needs headroom for sending the frame,
4986 * and we need some headroom for passing the frame to monitor
4987 * interfaces, but never both at the same time.
4988 */
4989 local->tx_headroom = max(local->hw.extra_tx_headroom,
4990 sizeof(struct ieee80211_tx_status_rtap_hdr));
4991
4715 debugfs_hw_add(local); 4992 debugfs_hw_add(local);
4716 4993
4717 local->hw.conf.beacon_int = 1000; 4994 local->hw.conf.beacon_int = 1000;
diff --git a/net/mac80211/ieee80211_common.h b/net/mac80211/ieee80211_common.h
index b9a73e7f5f75..77c6afb7f6a8 100644
--- a/net/mac80211/ieee80211_common.h
+++ b/net/mac80211/ieee80211_common.h
@@ -47,21 +47,16 @@ enum ieee80211_msg_type {
47 ieee80211_msg_normal = 0, 47 ieee80211_msg_normal = 0,
48 ieee80211_msg_tx_callback_ack = 1, 48 ieee80211_msg_tx_callback_ack = 1,
49 ieee80211_msg_tx_callback_fail = 2, 49 ieee80211_msg_tx_callback_fail = 2,
50 ieee80211_msg_passive_scan = 3, 50 /* hole at 3, was ieee80211_msg_passive_scan but unused */
51 ieee80211_msg_wep_frame_unknown_key = 4, 51 ieee80211_msg_wep_frame_unknown_key = 4,
52 ieee80211_msg_michael_mic_failure = 5, 52 ieee80211_msg_michael_mic_failure = 5,
53 /* hole at 6, was monitor but never sent to userspace */ 53 /* hole at 6, was monitor but never sent to userspace */
54 ieee80211_msg_sta_not_assoc = 7, 54 ieee80211_msg_sta_not_assoc = 7,
55 ieee80211_msg_set_aid_for_sta = 8 /* used by Intersil MVC driver */, 55 /* 8 was ieee80211_msg_set_aid_for_sta */
56 ieee80211_msg_key_threshold_notification = 9, 56 ieee80211_msg_key_threshold_notification = 9,
57 ieee80211_msg_radar = 11, 57 ieee80211_msg_radar = 11,
58}; 58};
59 59
60struct ieee80211_msg_set_aid_for_sta {
61 char sta_address[ETH_ALEN];
62 u16 aid;
63};
64
65struct ieee80211_msg_key_notification { 60struct ieee80211_msg_key_notification {
66 int tx_rx_count; 61 int tx_rx_count;
67 char ifname[IFNAMSIZ]; 62 char ifname[IFNAMSIZ];
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index af4d14d0b969..055a2a912185 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -99,6 +99,12 @@ struct ieee80211_sta_bss {
99 int probe_resp; 99 int probe_resp;
100 unsigned long last_update; 100 unsigned long last_update;
101 101
102 /* during assocation, we save an ERP value from a probe response so
103 * that we can feed ERP info to the driver when handling the
104 * association completes. these fields probably won't be up-to-date
105 * otherwise, you probably don't want to use them. */
106 int has_erp_value;
107 u8 erp_value;
102}; 108};
103 109
104 110
@@ -235,7 +241,6 @@ struct ieee80211_if_sta {
235 unsigned int authenticated:1; 241 unsigned int authenticated:1;
236 unsigned int associated:1; 242 unsigned int associated:1;
237 unsigned int probereq_poll:1; 243 unsigned int probereq_poll:1;
238 unsigned int use_protection:1;
239 unsigned int create_ibss:1; 244 unsigned int create_ibss:1;
240 unsigned int mixed_cell:1; 245 unsigned int mixed_cell:1;
241 unsigned int wmm_enabled:1; 246 unsigned int wmm_enabled:1;
@@ -278,6 +283,7 @@ struct ieee80211_sub_if_data {
278 int mc_count; 283 int mc_count;
279 unsigned int allmulti:1; 284 unsigned int allmulti:1;
280 unsigned int promisc:1; 285 unsigned int promisc:1;
286 unsigned int use_protection:1; /* CTS protect ERP frames */
281 287
282 struct net_device_stats stats; 288 struct net_device_stats stats;
283 int drop_unencrypted; 289 int drop_unencrypted;
@@ -392,6 +398,7 @@ struct ieee80211_local {
392 int monitors; 398 int monitors;
393 struct iw_statistics wstats; 399 struct iw_statistics wstats;
394 u8 wstats_flags; 400 u8 wstats_flags;
401 int tx_headroom; /* required headroom for hardware/radiotap */
395 402
396 enum { 403 enum {
397 IEEE80211_DEV_UNINITIALIZED = 0, 404 IEEE80211_DEV_UNINITIALIZED = 0,
@@ -437,7 +444,6 @@ struct ieee80211_local {
437 int *basic_rates[NUM_IEEE80211_MODES]; 444 int *basic_rates[NUM_IEEE80211_MODES];
438 445
439 int rts_threshold; 446 int rts_threshold;
440 int cts_protect_erp_frames;
441 int fragmentation_threshold; 447 int fragmentation_threshold;
442 int short_retry_limit; /* dot11ShortRetryLimit */ 448 int short_retry_limit; /* dot11ShortRetryLimit */
443 int long_retry_limit; /* dot11LongRetryLimit */ 449 int long_retry_limit; /* dot11LongRetryLimit */
@@ -513,8 +519,6 @@ struct ieee80211_local {
513 STA_ANTENNA_SEL_SW_CTRL_DEBUG = 2 519 STA_ANTENNA_SEL_SW_CTRL_DEBUG = 2
514 } sta_antenna_sel; 520 } sta_antenna_sel;
515 521
516 int rate_ctrl_num_up, rate_ctrl_num_down;
517
518#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 522#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
519 /* TX/RX handler statistics */ 523 /* TX/RX handler statistics */
520 unsigned int tx_handlers_drop; 524 unsigned int tx_handlers_drop;
@@ -719,6 +723,8 @@ void ieee80211_prepare_rates(struct ieee80211_local *local,
719 struct ieee80211_hw_mode *mode); 723 struct ieee80211_hw_mode *mode);
720void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx); 724void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx);
721int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr); 725int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr);
726int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
727int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
722void ieee80211_if_setup(struct net_device *dev); 728void ieee80211_if_setup(struct net_device *dev);
723void ieee80211_if_mgmt_setup(struct net_device *dev); 729void ieee80211_if_mgmt_setup(struct net_device *dev);
724int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 730int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
diff --git a/net/mac80211/ieee80211_iface.c b/net/mac80211/ieee80211_iface.c
index cf0f32e8c2a2..8532a5ccdd1e 100644
--- a/net/mac80211/ieee80211_iface.c
+++ b/net/mac80211/ieee80211_iface.c
@@ -157,6 +157,8 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
157 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 157 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
158 int oldtype = sdata->type; 158 int oldtype = sdata->type;
159 159
160 dev->hard_start_xmit = ieee80211_subif_start_xmit;
161
160 sdata->type = type; 162 sdata->type = type;
161 switch (type) { 163 switch (type) {
162 case IEEE80211_IF_TYPE_WDS: 164 case IEEE80211_IF_TYPE_WDS:
@@ -196,6 +198,7 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
196 } 198 }
197 case IEEE80211_IF_TYPE_MNTR: 199 case IEEE80211_IF_TYPE_MNTR:
198 dev->type = ARPHRD_IEEE80211_RADIOTAP; 200 dev->type = ARPHRD_IEEE80211_RADIOTAP;
201 dev->hard_start_xmit = ieee80211_monitor_start_xmit;
199 break; 202 break;
200 default: 203 default:
201 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", 204 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x",
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index 66e8a976b311..5918dd079e12 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -345,6 +345,8 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
345{ 345{
346 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 346 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
347 struct iw_range *range = (struct iw_range *) extra; 347 struct iw_range *range = (struct iw_range *) extra;
348 struct ieee80211_hw_mode *mode = NULL;
349 int c = 0;
348 350
349 data->length = sizeof(struct iw_range); 351 data->length = sizeof(struct iw_range);
350 memset(range, 0, sizeof(struct iw_range)); 352 memset(range, 0, sizeof(struct iw_range));
@@ -378,6 +380,29 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
378 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 380 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
379 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 381 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
380 382
383 list_for_each_entry(mode, &local->modes_list, list) {
384 int i = 0;
385
386 if (!(local->enabled_modes & (1 << mode->mode)) ||
387 (local->hw_modes & local->enabled_modes &
388 (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B))
389 continue;
390
391 while (i < mode->num_channels && c < IW_MAX_FREQUENCIES) {
392 struct ieee80211_channel *chan = &mode->channels[i];
393
394 if (chan->flag & IEEE80211_CHAN_W_SCAN) {
395 range->freq[c].i = chan->chan;
396 range->freq[c].m = chan->freq * 100000;
397 range->freq[c].e = 1;
398 c++;
399 }
400 i++;
401 }
402 }
403 range->num_channels = c;
404 range->num_frequency = c;
405
381 IW_EVENT_CAPA_SET_KERNEL(range->event_capa); 406 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
382 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY); 407 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
383 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); 408 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
@@ -838,6 +863,44 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
838} 863}
839 864
840 865
866static int ieee80211_ioctl_siwrate(struct net_device *dev,
867 struct iw_request_info *info,
868 struct iw_param *rate, char *extra)
869{
870 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
871 struct ieee80211_hw_mode *mode;
872 int i;
873 u32 target_rate = rate->value / 100000;
874 struct ieee80211_sub_if_data *sdata;
875
876 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
877 if (!sdata->bss)
878 return -ENODEV;
879 mode = local->oper_hw_mode;
880 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
881 * target_rate = X, rate->fixed = 1 means only rate X
882 * target_rate = X, rate->fixed = 0 means all rates <= X */
883 sdata->bss->max_ratectrl_rateidx = -1;
884 sdata->bss->force_unicast_rateidx = -1;
885 if (rate->value < 0)
886 return 0;
887 for (i=0; i< mode->num_rates; i++) {
888 struct ieee80211_rate *rates = &mode->rates[i];
889 int this_rate = rates->rate;
890
891 if (mode->mode == MODE_ATHEROS_TURBO ||
892 mode->mode == MODE_ATHEROS_TURBOG)
893 this_rate *= 2;
894 if (target_rate == this_rate) {
895 sdata->bss->max_ratectrl_rateidx = i;
896 if (rate->fixed)
897 sdata->bss->force_unicast_rateidx = i;
898 break;
899 }
900 }
901 return 0;
902}
903
841static int ieee80211_ioctl_giwrate(struct net_device *dev, 904static int ieee80211_ioctl_giwrate(struct net_device *dev,
842 struct iw_request_info *info, 905 struct iw_request_info *info,
843 struct iw_param *rate, char *extra) 906 struct iw_param *rate, char *extra)
@@ -993,118 +1056,6 @@ static int ieee80211_ioctl_giwretry(struct net_device *dev,
993 return 0; 1056 return 0;
994} 1057}
995 1058
996static int ieee80211_ioctl_clear_keys(struct net_device *dev)
997{
998 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
999 struct ieee80211_key_conf key;
1000 int i;
1001 u8 addr[ETH_ALEN];
1002 struct ieee80211_key_conf *keyconf;
1003 struct ieee80211_sub_if_data *sdata;
1004 struct sta_info *sta;
1005
1006 memset(addr, 0xff, ETH_ALEN);
1007 read_lock(&local->sub_if_lock);
1008 list_for_each_entry(sdata, &local->sub_if_list, list) {
1009 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1010 keyconf = NULL;
1011 if (sdata->keys[i] &&
1012 !sdata->keys[i]->force_sw_encrypt &&
1013 local->ops->set_key &&
1014 (keyconf = ieee80211_key_data2conf(local,
1015 sdata->keys[i])))
1016 local->ops->set_key(local_to_hw(local),
1017 DISABLE_KEY, addr,
1018 keyconf, 0);
1019 kfree(keyconf);
1020 ieee80211_key_free(sdata->keys[i]);
1021 sdata->keys[i] = NULL;
1022 }
1023 sdata->default_key = NULL;
1024 }
1025 read_unlock(&local->sub_if_lock);
1026
1027 spin_lock_bh(&local->sta_lock);
1028 list_for_each_entry(sta, &local->sta_list, list) {
1029 keyconf = NULL;
1030 if (sta->key && !sta->key->force_sw_encrypt &&
1031 local->ops->set_key &&
1032 (keyconf = ieee80211_key_data2conf(local, sta->key)))
1033 local->ops->set_key(local_to_hw(local), DISABLE_KEY,
1034 sta->addr, keyconf, sta->aid);
1035 kfree(keyconf);
1036 ieee80211_key_free(sta->key);
1037 sta->key = NULL;
1038 }
1039 spin_unlock_bh(&local->sta_lock);
1040
1041 memset(&key, 0, sizeof(key));
1042 if (local->ops->set_key &&
1043 local->ops->set_key(local_to_hw(local), REMOVE_ALL_KEYS,
1044 NULL, &key, 0))
1045 printk(KERN_DEBUG "%s: failed to remove hwaccel keys\n",
1046 dev->name);
1047
1048 return 0;
1049}
1050
1051
1052static int
1053ieee80211_ioctl_force_unicast_rate(struct net_device *dev,
1054 struct ieee80211_sub_if_data *sdata,
1055 int rate)
1056{
1057 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1058 struct ieee80211_hw_mode *mode;
1059 int i;
1060
1061 if (sdata->type != IEEE80211_IF_TYPE_AP)
1062 return -ENOENT;
1063
1064 if (rate == 0) {
1065 sdata->u.ap.force_unicast_rateidx = -1;
1066 return 0;
1067 }
1068
1069 mode = local->oper_hw_mode;
1070 for (i = 0; i < mode->num_rates; i++) {
1071 if (mode->rates[i].rate == rate) {
1072 sdata->u.ap.force_unicast_rateidx = i;
1073 return 0;
1074 }
1075 }
1076 return -EINVAL;
1077}
1078
1079
1080static int
1081ieee80211_ioctl_max_ratectrl_rate(struct net_device *dev,
1082 struct ieee80211_sub_if_data *sdata,
1083 int rate)
1084{
1085 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1086 struct ieee80211_hw_mode *mode;
1087 int i;
1088
1089 if (sdata->type != IEEE80211_IF_TYPE_AP)
1090 return -ENOENT;
1091
1092 if (rate == 0) {
1093 sdata->u.ap.max_ratectrl_rateidx = -1;
1094 return 0;
1095 }
1096
1097 mode = local->oper_hw_mode;
1098 for (i = 0; i < mode->num_rates; i++) {
1099 if (mode->rates[i].rate == rate) {
1100 sdata->u.ap.max_ratectrl_rateidx = i;
1101 return 0;
1102 }
1103 }
1104 return -EINVAL;
1105}
1106
1107
1108static void ieee80211_key_enable_hwaccel(struct ieee80211_local *local, 1059static void ieee80211_key_enable_hwaccel(struct ieee80211_local *local,
1109 struct ieee80211_key *key) 1060 struct ieee80211_key *key)
1110{ 1061{
@@ -1228,24 +1179,11 @@ static int ieee80211_ioctl_prism2_param(struct net_device *dev,
1228 sdata->ieee802_1x = value; 1179 sdata->ieee802_1x = value;
1229 break; 1180 break;
1230 1181
1231 case PRISM2_PARAM_ANTSEL_TX:
1232 local->hw.conf.antenna_sel_tx = value;
1233 if (ieee80211_hw_config(local))
1234 ret = -EINVAL;
1235 break;
1236
1237 case PRISM2_PARAM_ANTSEL_RX:
1238 local->hw.conf.antenna_sel_rx = value;
1239 if (ieee80211_hw_config(local))
1240 ret = -EINVAL;
1241 break;
1242
1243 case PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES: 1182 case PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES:
1244 local->cts_protect_erp_frames = value; 1183 if (sdata->type != IEEE80211_IF_TYPE_AP)
1245 break; 1184 ret = -ENOENT;
1246 1185 else
1247 case PRISM2_PARAM_DROP_UNENCRYPTED: 1186 sdata->use_protection = value;
1248 sdata->drop_unencrypted = value;
1249 break; 1187 break;
1250 1188
1251 case PRISM2_PARAM_PREAMBLE: 1189 case PRISM2_PARAM_PREAMBLE:
@@ -1274,10 +1212,6 @@ static int ieee80211_ioctl_prism2_param(struct net_device *dev,
1274 local->next_mode = value; 1212 local->next_mode = value;
1275 break; 1213 break;
1276 1214
1277 case PRISM2_PARAM_CLEAR_KEYS:
1278 ret = ieee80211_ioctl_clear_keys(dev);
1279 break;
1280
1281 case PRISM2_PARAM_RADIO_ENABLED: 1215 case PRISM2_PARAM_RADIO_ENABLED:
1282 ret = ieee80211_ioctl_set_radio_enabled(dev, value); 1216 ret = ieee80211_ioctl_set_radio_enabled(dev, value);
1283 break; 1217 break;
@@ -1292,22 +1226,6 @@ static int ieee80211_ioctl_prism2_param(struct net_device *dev,
1292 local->sta_antenna_sel = value; 1226 local->sta_antenna_sel = value;
1293 break; 1227 break;
1294 1228
1295 case PRISM2_PARAM_FORCE_UNICAST_RATE:
1296 ret = ieee80211_ioctl_force_unicast_rate(dev, sdata, value);
1297 break;
1298
1299 case PRISM2_PARAM_MAX_RATECTRL_RATE:
1300 ret = ieee80211_ioctl_max_ratectrl_rate(dev, sdata, value);
1301 break;
1302
1303 case PRISM2_PARAM_RATE_CTRL_NUM_UP:
1304 local->rate_ctrl_num_up = value;
1305 break;
1306
1307 case PRISM2_PARAM_RATE_CTRL_NUM_DOWN:
1308 local->rate_ctrl_num_down = value;
1309 break;
1310
1311 case PRISM2_PARAM_TX_POWER_REDUCTION: 1229 case PRISM2_PARAM_TX_POWER_REDUCTION:
1312 if (value < 0) 1230 if (value < 0)
1313 ret = -EINVAL; 1231 ret = -EINVAL;
@@ -1387,20 +1305,8 @@ static int ieee80211_ioctl_get_prism2_param(struct net_device *dev,
1387 *param = sdata->ieee802_1x; 1305 *param = sdata->ieee802_1x;
1388 break; 1306 break;
1389 1307
1390 case PRISM2_PARAM_ANTSEL_TX:
1391 *param = local->hw.conf.antenna_sel_tx;
1392 break;
1393
1394 case PRISM2_PARAM_ANTSEL_RX:
1395 *param = local->hw.conf.antenna_sel_rx;
1396 break;
1397
1398 case PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES: 1308 case PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES:
1399 *param = local->cts_protect_erp_frames; 1309 *param = sdata->use_protection;
1400 break;
1401
1402 case PRISM2_PARAM_DROP_UNENCRYPTED:
1403 *param = sdata->drop_unencrypted;
1404 break; 1310 break;
1405 1311
1406 case PRISM2_PARAM_PREAMBLE: 1312 case PRISM2_PARAM_PREAMBLE:
@@ -1426,14 +1332,6 @@ static int ieee80211_ioctl_get_prism2_param(struct net_device *dev,
1426 *param = local->sta_antenna_sel; 1332 *param = local->sta_antenna_sel;
1427 break; 1333 break;
1428 1334
1429 case PRISM2_PARAM_RATE_CTRL_NUM_UP:
1430 *param = local->rate_ctrl_num_up;
1431 break;
1432
1433 case PRISM2_PARAM_RATE_CTRL_NUM_DOWN:
1434 *param = local->rate_ctrl_num_down;
1435 break;
1436
1437 case PRISM2_PARAM_TX_POWER_REDUCTION: 1335 case PRISM2_PARAM_TX_POWER_REDUCTION:
1438 *param = local->hw.conf.tx_power_reduction; 1336 *param = local->hw.conf.tx_power_reduction;
1439 break; 1337 break;
@@ -1801,7 +1699,7 @@ static const iw_handler ieee80211_handler[] =
1801 (iw_handler) NULL, /* SIOCGIWNICKN */ 1699 (iw_handler) NULL, /* SIOCGIWNICKN */
1802 (iw_handler) NULL, /* -- hole -- */ 1700 (iw_handler) NULL, /* -- hole -- */
1803 (iw_handler) NULL, /* -- hole -- */ 1701 (iw_handler) NULL, /* -- hole -- */
1804 (iw_handler) NULL, /* SIOCSIWRATE */ 1702 (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */
1805 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */ 1703 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */
1806 (iw_handler) ieee80211_ioctl_siwrts, /* SIOCSIWRTS */ 1704 (iw_handler) ieee80211_ioctl_siwrts, /* SIOCSIWRTS */
1807 (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */ 1705 (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index 91b545c144c1..ba2bf8f0a347 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -76,33 +76,36 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
76 76
77/* Parsed Information Elements */ 77/* Parsed Information Elements */
78struct ieee802_11_elems { 78struct ieee802_11_elems {
79 /* pointers to IEs */
79 u8 *ssid; 80 u8 *ssid;
80 u8 ssid_len;
81 u8 *supp_rates; 81 u8 *supp_rates;
82 u8 supp_rates_len;
83 u8 *fh_params; 82 u8 *fh_params;
84 u8 fh_params_len;
85 u8 *ds_params; 83 u8 *ds_params;
86 u8 ds_params_len;
87 u8 *cf_params; 84 u8 *cf_params;
88 u8 cf_params_len;
89 u8 *tim; 85 u8 *tim;
90 u8 tim_len;
91 u8 *ibss_params; 86 u8 *ibss_params;
92 u8 ibss_params_len;
93 u8 *challenge; 87 u8 *challenge;
94 u8 challenge_len;
95 u8 *wpa; 88 u8 *wpa;
96 u8 wpa_len;
97 u8 *rsn; 89 u8 *rsn;
98 u8 rsn_len;
99 u8 *erp_info; 90 u8 *erp_info;
100 u8 erp_info_len;
101 u8 *ext_supp_rates; 91 u8 *ext_supp_rates;
102 u8 ext_supp_rates_len;
103 u8 *wmm_info; 92 u8 *wmm_info;
104 u8 wmm_info_len;
105 u8 *wmm_param; 93 u8 *wmm_param;
94
95 /* length of them, respectively */
96 u8 ssid_len;
97 u8 supp_rates_len;
98 u8 fh_params_len;
99 u8 ds_params_len;
100 u8 cf_params_len;
101 u8 tim_len;
102 u8 ibss_params_len;
103 u8 challenge_len;
104 u8 wpa_len;
105 u8 rsn_len;
106 u8 erp_info_len;
107 u8 ext_supp_rates_len;
108 u8 wmm_info_len;
106 u8 wmm_param_len; 109 u8 wmm_param_len;
107}; 110};
108 111
@@ -311,6 +314,25 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
311} 314}
312 315
313 316
317static void ieee80211_handle_erp_ie(struct net_device *dev, u8 erp_value)
318{
319 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
320 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
321 int use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
322
323 if (use_protection != sdata->use_protection) {
324 if (net_ratelimit()) {
325 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
326 MAC_FMT ")\n",
327 dev->name,
328 use_protection ? "enabled" : "disabled",
329 MAC_ARG(ifsta->bssid));
330 }
331 sdata->use_protection = use_protection;
332 }
333}
334
335
314static void ieee80211_sta_send_associnfo(struct net_device *dev, 336static void ieee80211_sta_send_associnfo(struct net_device *dev,
315 struct ieee80211_if_sta *ifsta) 337 struct ieee80211_if_sta *ifsta)
316{ 338{
@@ -366,6 +388,7 @@ static void ieee80211_set_associated(struct net_device *dev,
366 struct ieee80211_if_sta *ifsta, int assoc) 388 struct ieee80211_if_sta *ifsta, int assoc)
367{ 389{
368 union iwreq_data wrqu; 390 union iwreq_data wrqu;
391 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
369 392
370 if (ifsta->associated == assoc) 393 if (ifsta->associated == assoc)
371 return; 394 return;
@@ -374,9 +397,18 @@ static void ieee80211_set_associated(struct net_device *dev,
374 397
375 if (assoc) { 398 if (assoc) {
376 struct ieee80211_sub_if_data *sdata; 399 struct ieee80211_sub_if_data *sdata;
400 struct ieee80211_sta_bss *bss;
377 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 401 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
378 if (sdata->type != IEEE80211_IF_TYPE_STA) 402 if (sdata->type != IEEE80211_IF_TYPE_STA)
379 return; 403 return;
404
405 bss = ieee80211_rx_bss_get(dev, ifsta->bssid);
406 if (bss) {
407 if (bss->has_erp_value)
408 ieee80211_handle_erp_ie(dev, bss->erp_value);
409 ieee80211_rx_bss_put(dev, bss);
410 }
411
380 netif_carrier_on(dev); 412 netif_carrier_on(dev);
381 ifsta->prev_bssid_set = 1; 413 ifsta->prev_bssid_set = 1;
382 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); 414 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
@@ -384,6 +416,7 @@ static void ieee80211_set_associated(struct net_device *dev,
384 ieee80211_sta_send_associnfo(dev, ifsta); 416 ieee80211_sta_send_associnfo(dev, ifsta);
385 } else { 417 } else {
386 netif_carrier_off(dev); 418 netif_carrier_off(dev);
419 sdata->use_protection = 0;
387 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); 420 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
388 } 421 }
389 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 422 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
@@ -1174,6 +1207,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev,
1174 return; 1207 return;
1175 } 1208 }
1176 1209
1210 /* it probably doesn't, but if the frame includes an ERP value then
1211 * update our stored copy */
1212 if (elems.erp_info && elems.erp_info_len >= 1) {
1213 struct ieee80211_sta_bss *bss
1214 = ieee80211_rx_bss_get(dev, ifsta->bssid);
1215 if (bss) {
1216 bss->erp_value = elems.erp_info[0];
1217 bss->has_erp_value = 1;
1218 ieee80211_rx_bss_put(dev, bss);
1219 }
1220 }
1221
1177 printk(KERN_DEBUG "%s: associated\n", dev->name); 1222 printk(KERN_DEBUG "%s: associated\n", dev->name);
1178 ifsta->aid = aid; 1223 ifsta->aid = aid;
1179 ifsta->ap_capab = capab_info; 1224 ifsta->ap_capab = capab_info;
@@ -1496,6 +1541,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1496 return; 1541 return;
1497 } 1542 }
1498 1543
1544 /* save the ERP value so that it is available at association time */
1545 if (elems.erp_info && elems.erp_info_len >= 1) {
1546 bss->erp_value = elems.erp_info[0];
1547 bss->has_erp_value = 1;
1548 }
1549
1499 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 1550 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
1500 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 1551 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
1501 if (elems.ssid && elems.ssid_len <= IEEE80211_MAX_SSID_LEN) { 1552 if (elems.ssid && elems.ssid_len <= IEEE80211_MAX_SSID_LEN) {
@@ -1611,10 +1662,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
1611 size_t len, 1662 size_t len,
1612 struct ieee80211_rx_status *rx_status) 1663 struct ieee80211_rx_status *rx_status)
1613{ 1664{
1614 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1615 struct ieee80211_sub_if_data *sdata; 1665 struct ieee80211_sub_if_data *sdata;
1616 struct ieee80211_if_sta *ifsta; 1666 struct ieee80211_if_sta *ifsta;
1617 int use_protection;
1618 size_t baselen; 1667 size_t baselen;
1619 struct ieee802_11_elems elems; 1668 struct ieee802_11_elems elems;
1620 1669
@@ -1638,23 +1687,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
1638 &elems) == ParseFailed) 1687 &elems) == ParseFailed)
1639 return; 1688 return;
1640 1689
1641 use_protection = 0; 1690 if (elems.erp_info && elems.erp_info_len >= 1)
1642 if (elems.erp_info && elems.erp_info_len >= 1) { 1691 ieee80211_handle_erp_ie(dev, elems.erp_info[0]);
1643 use_protection =
1644 (elems.erp_info[0] & ERP_INFO_USE_PROTECTION) != 0;
1645 }
1646
1647 if (use_protection != !!ifsta->use_protection) {
1648 if (net_ratelimit()) {
1649 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
1650 MAC_FMT ")\n",
1651 dev->name,
1652 use_protection ? "enabled" : "disabled",
1653 MAC_ARG(ifsta->bssid));
1654 }
1655 ifsta->use_protection = use_protection ? 1 : 0;
1656 local->cts_protect_erp_frames = use_protection;
1657 }
1658 1692
1659 if (elems.wmm_param && ifsta->wmm_enabled) { 1693 if (elems.wmm_param && ifsta->wmm_enabled) {
1660 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 1694 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index 5ae7fc454665..f6780d63b342 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -187,9 +187,13 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
187 } 187 }
188#endif 188#endif
189 189
190 if (per_failed > local->rate_ctrl_num_down) { 190 /*
191 * XXX: Make these configurable once we have an
192 * interface to the rate control algorithms
193 */
194 if (per_failed > RATE_CONTROL_NUM_DOWN) {
191 rate_control_rate_dec(local, sta); 195 rate_control_rate_dec(local, sta);
192 } else if (per_failed < local->rate_ctrl_num_up) { 196 } else if (per_failed < RATE_CONTROL_NUM_UP) {
193 rate_control_rate_inc(local, sta); 197 rate_control_rate_inc(local, sta);
194 } 198 }
195 srctrl->tx_avg_rate_sum += status->control.rate->rate; 199 srctrl->tx_avg_rate_sum += status->control.rate->rate;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index df5e8dab871d..3ac39f1ec775 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -102,6 +102,16 @@ config NF_CT_PROTO_SCTP
102 If you want to compile it as a module, say M here and read 102 If you want to compile it as a module, say M here and read
103 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 103 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
104 104
105config NF_CT_PROTO_UDPLITE
106 tristate 'UDP-Lite protocol connection tracking support (EXPERIMENTAL)'
107 depends on EXPERIMENTAL && NF_CONNTRACK
108 help
109 With this option enabled, the layer 3 independent connection
110 tracking code will be able to do state tracking on UDP-Lite
111 connections.
112
113 To compile it as a module, choose M here. If unsure, say N.
114
105config NF_CONNTRACK_AMANDA 115config NF_CONNTRACK_AMANDA
106 tristate "Amanda backup protocol support" 116 tristate "Amanda backup protocol support"
107 depends on NF_CONNTRACK 117 depends on NF_CONNTRACK
@@ -423,6 +433,13 @@ config NETFILTER_XT_MATCH_CONNBYTES
423 If you want to compile it as a module, say M here and read 433 If you want to compile it as a module, say M here and read
424 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 434 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
425 435
436config NETFILTER_XT_MATCH_CONNLIMIT
437 tristate '"connlimit" match support"'
438 depends on NETFILTER_XTABLES
439 ---help---
440 This match allows you to match against the number of parallel
441 connections to a server per client IP address (or address block).
442
426config NETFILTER_XT_MATCH_CONNMARK 443config NETFILTER_XT_MATCH_CONNMARK
427 tristate '"connmark" connection mark match support' 444 tristate '"connmark" connection mark match support'
428 depends on NETFILTER_XTABLES 445 depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 58b4245a1723..0c054bf27973 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
16# SCTP protocol connection tracking 16# SCTP protocol connection tracking
17obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o 17obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
18obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o 18obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
19obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
19 20
20# netlink interface for nf_conntrack 21# netlink interface for nf_conntrack
21obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o 22obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
@@ -52,6 +53,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
52# matches 53# matches
53obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o 54obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
54obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o 55obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
56obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
55obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o 57obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
56obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o 58obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
57obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o 59obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3d1411012a2c..8cce814f6bee 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -113,6 +113,36 @@ nf_ct_get_tuple(const struct sk_buff *skb,
113} 113}
114EXPORT_SYMBOL_GPL(nf_ct_get_tuple); 114EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
115 115
116int nf_ct_get_tuplepr(const struct sk_buff *skb,
117 unsigned int nhoff,
118 u_int16_t l3num,
119 struct nf_conntrack_tuple *tuple)
120{
121 struct nf_conntrack_l3proto *l3proto;
122 struct nf_conntrack_l4proto *l4proto;
123 unsigned int protoff;
124 u_int8_t protonum;
125 int ret;
126
127 rcu_read_lock();
128
129 l3proto = __nf_ct_l3proto_find(l3num);
130 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
131 if (ret != NF_ACCEPT) {
132 rcu_read_unlock();
133 return 0;
134 }
135
136 l4proto = __nf_ct_l4proto_find(l3num, protonum);
137
138 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
139 l3proto, l4proto);
140
141 rcu_read_unlock();
142 return ret;
143}
144EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
145
116int 146int
117nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 147nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
118 const struct nf_conntrack_tuple *orig, 148 const struct nf_conntrack_tuple *orig,
@@ -622,9 +652,12 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
622 652
623 /* rcu_read_lock()ed by nf_hook_slow */ 653 /* rcu_read_lock()ed by nf_hook_slow */
624 l3proto = __nf_ct_l3proto_find((u_int16_t)pf); 654 l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
625 655 ret = l3proto->get_l4proto(*pskb, skb_network_offset(*pskb),
626 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) { 656 &dataoff, &protonum);
657 if (ret <= 0) {
627 pr_debug("not prepared to track yet or error occured\n"); 658 pr_debug("not prepared to track yet or error occured\n");
659 NF_CT_STAT_INC_ATOMIC(error);
660 NF_CT_STAT_INC_ATOMIC(invalid);
628 return -ret; 661 return -ret;
629 } 662 }
630 663
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
index b1bfa207a850..991c52c9a28b 100644
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -61,22 +61,21 @@ static int generic_print_conntrack(struct seq_file *s,
61 return 0; 61 return 0;
62} 62}
63 63
64static int 64static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
65generic_prepare(struct sk_buff **pskb, unsigned int hooknum, 65 unsigned int *dataoff, u_int8_t *protonum)
66 unsigned int *dataoff, u_int8_t *protonum)
67{ 66{
68 /* Never track !!! */ 67 /* Never track !!! */
69 return -NF_ACCEPT; 68 return -NF_ACCEPT;
70} 69}
71 70
72 71
73struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { 72struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = {
74 .l3proto = PF_UNSPEC, 73 .l3proto = PF_UNSPEC,
75 .name = "unknown", 74 .name = "unknown",
76 .pkt_to_tuple = generic_pkt_to_tuple, 75 .pkt_to_tuple = generic_pkt_to_tuple,
77 .invert_tuple = generic_invert_tuple, 76 .invert_tuple = generic_invert_tuple,
78 .print_tuple = generic_print_tuple, 77 .print_tuple = generic_print_tuple,
79 .print_conntrack = generic_print_conntrack, 78 .print_conntrack = generic_print_conntrack,
80 .prepare = generic_prepare, 79 .get_l4proto = generic_get_l4proto,
81}; 80};
82EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); 81EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 6faf1bed7224..d8b501878d9f 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -98,7 +98,7 @@ static struct ctl_table generic_compat_sysctl_table[] = {
98#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 98#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
99#endif /* CONFIG_SYSCTL */ 99#endif /* CONFIG_SYSCTL */
100 100
101struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = 101struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
102{ 102{
103 .l3proto = PF_UNSPEC, 103 .l3proto = PF_UNSPEC,
104 .l4proto = 0, 104 .l4proto = 0,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 771c4c29936e..bdbead8a7a83 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -261,7 +261,7 @@ static void gre_destroy(struct nf_conn *ct)
261} 261}
262 262
263/* protocol helper struct */ 263/* protocol helper struct */
264static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { 264static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
265 .l3proto = AF_INET, 265 .l3proto = AF_INET,
266 .l4proto = IPPROTO_GRE, 266 .l4proto = IPPROTO_GRE,
267 .name = "gre", 267 .name = "gre",
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index debfe61378a1..04192acc7c40 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -601,7 +601,7 @@ static struct ctl_table sctp_compat_sysctl_table[] = {
601#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 601#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
602#endif 602#endif
603 603
604struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { 604static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
605 .l3proto = PF_INET, 605 .l3proto = PF_INET,
606 .l4proto = IPPROTO_SCTP, 606 .l4proto = IPPROTO_SCTP,
607 .name = "sctp", 607 .name = "sctp",
@@ -622,7 +622,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
622#endif 622#endif
623}; 623};
624 624
625struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { 625static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
626 .l3proto = PF_INET6, 626 .l3proto = PF_INET6,
627 .l4proto = IPPROTO_SCTP, 627 .l4proto = IPPROTO_SCTP,
628 .name = "sctp", 628 .name = "sctp",
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 1c8206e6560a..87ad3ccf8aff 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1372,7 +1372,7 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
1372#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 1372#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
1373#endif /* CONFIG_SYSCTL */ 1373#endif /* CONFIG_SYSCTL */
1374 1374
1375struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = 1375struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1376{ 1376{
1377 .l3proto = PF_INET, 1377 .l3proto = PF_INET,
1378 .l4proto = IPPROTO_TCP, 1378 .l4proto = IPPROTO_TCP,
@@ -1401,7 +1401,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
1401}; 1401};
1402EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4); 1402EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
1403 1403
1404struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = 1404struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1405{ 1405{
1406 .l3proto = PF_INET6, 1406 .l3proto = PF_INET6,
1407 .l4proto = IPPROTO_TCP, 1407 .l4proto = IPPROTO_TCP,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 3620ecc095fd..13d94a025723 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -191,7 +191,7 @@ static struct ctl_table udp_compat_sysctl_table[] = {
191#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 191#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
192#endif /* CONFIG_SYSCTL */ 192#endif /* CONFIG_SYSCTL */
193 193
194struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = 194struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
195{ 195{
196 .l3proto = PF_INET, 196 .l3proto = PF_INET,
197 .l4proto = IPPROTO_UDP, 197 .l4proto = IPPROTO_UDP,
@@ -218,7 +218,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
218}; 218};
219EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); 219EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
220 220
221struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = 221struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
222{ 222{
223 .l3proto = PF_INET6, 223 .l3proto = PF_INET6,
224 .l4proto = IPPROTO_UDP, 224 .l4proto = IPPROTO_UDP,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
new file mode 100644
index 000000000000..93e747b5396e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -0,0 +1,266 @@
1/* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2007 Patrick McHardy <kaber@trash.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/types.h>
11#include <linux/timer.h>
12#include <linux/module.h>
13#include <linux/netfilter.h>
14#include <linux/udp.h>
15#include <linux/seq_file.h>
16#include <linux/skbuff.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include <net/checksum.h>
20
21#include <linux/netfilter.h>
22#include <linux/netfilter_ipv4.h>
23#include <linux/netfilter_ipv6.h>
24#include <net/netfilter/nf_conntrack_l4proto.h>
25#include <net/netfilter/nf_conntrack_ecache.h>
26
27static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ;
28static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ;
29
30static int udplite_pkt_to_tuple(const struct sk_buff *skb,
31 unsigned int dataoff,
32 struct nf_conntrack_tuple *tuple)
33{
34 struct udphdr _hdr, *hp;
35
36 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
37 if (hp == NULL)
38 return 0;
39
40 tuple->src.u.udp.port = hp->source;
41 tuple->dst.u.udp.port = hp->dest;
42 return 1;
43}
44
45static int udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
46 const struct nf_conntrack_tuple *orig)
47{
48 tuple->src.u.udp.port = orig->dst.u.udp.port;
49 tuple->dst.u.udp.port = orig->src.u.udp.port;
50 return 1;
51}
52
53/* Print out the per-protocol part of the tuple. */
54static int udplite_print_tuple(struct seq_file *s,
55 const struct nf_conntrack_tuple *tuple)
56{
57 return seq_printf(s, "sport=%hu dport=%hu ",
58 ntohs(tuple->src.u.udp.port),
59 ntohs(tuple->dst.u.udp.port));
60}
61
62/* Print out the private part of the conntrack. */
63static int udplite_print_conntrack(struct seq_file *s,
64 const struct nf_conn *conntrack)
65{
66 return 0;
67}
68
69/* Returns verdict for packet, and may modify conntracktype */
70static int udplite_packet(struct nf_conn *conntrack,
71 const struct sk_buff *skb,
72 unsigned int dataoff,
73 enum ip_conntrack_info ctinfo,
74 int pf,
75 unsigned int hooknum)
76{
77 /* If we've seen traffic both ways, this is some kind of UDP
78 stream. Extend timeout. */
79 if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
80 nf_ct_refresh_acct(conntrack, ctinfo, skb,
81 nf_ct_udplite_timeout_stream);
82 /* Also, more likely to be important, and not a probe */
83 if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
84 nf_conntrack_event_cache(IPCT_STATUS, skb);
85 } else
86 nf_ct_refresh_acct(conntrack, ctinfo, skb,
87 nf_ct_udplite_timeout);
88
89 return NF_ACCEPT;
90}
91
92/* Called when a new connection for this protocol found. */
93static int udplite_new(struct nf_conn *conntrack, const struct sk_buff *skb,
94 unsigned int dataoff)
95{
96 return 1;
97}
98
99static int udplite_error(struct sk_buff *skb, unsigned int dataoff,
100 enum ip_conntrack_info *ctinfo,
101 int pf,
102 unsigned int hooknum)
103{
104 unsigned int udplen = skb->len - dataoff;
105 struct udphdr _hdr, *hdr;
106 unsigned int cscov;
107
108 /* Header is too small? */
109 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
110 if (hdr == NULL) {
111 if (LOG_INVALID(IPPROTO_UDPLITE))
112 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
113 "nf_ct_udplite: short packet ");
114 return -NF_ACCEPT;
115 }
116
117 cscov = ntohs(hdr->len);
118 if (cscov == 0)
119 cscov = udplen;
120 else if (cscov < sizeof(*hdr) || cscov > udplen) {
121 if (LOG_INVALID(IPPROTO_UDPLITE))
122 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
123 "nf_ct_udplite: invalid checksum coverage ");
124 return -NF_ACCEPT;
125 }
126
127 /* UDPLITE mandates checksums */
128 if (!hdr->check) {
129 if (LOG_INVALID(IPPROTO_UDPLITE))
130 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
131 "nf_ct_udplite: checksum missing ");
132 return -NF_ACCEPT;
133 }
134
135 /* Checksum invalid? Ignore. */
136 if (nf_conntrack_checksum && !skb_csum_unnecessary(skb) &&
137 ((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
138 (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING))) {
139 if (pf == PF_INET) {
140 struct iphdr *iph = ip_hdr(skb);
141
142 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
143 udplen, IPPROTO_UDPLITE, 0);
144 } else {
145 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
146 __wsum hsum = skb_checksum(skb, 0, dataoff, 0);
147
148 skb->csum = ~csum_unfold(
149 csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
150 udplen, IPPROTO_UDPLITE,
151 csum_sub(0, hsum)));
152 }
153
154 skb->ip_summed = CHECKSUM_NONE;
155 if (__skb_checksum_complete_head(skb, dataoff + cscov)) {
156 if (LOG_INVALID(IPPROTO_UDPLITE))
157 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
158 "nf_ct_udplite: bad UDPLite "
159 "checksum ");
160 return -NF_ACCEPT;
161 }
162 skb->ip_summed = CHECKSUM_UNNECESSARY;
163 }
164
165 return NF_ACCEPT;
166}
167
168#ifdef CONFIG_SYSCTL
169static unsigned int udplite_sysctl_table_users;
170static struct ctl_table_header *udplite_sysctl_header;
171static struct ctl_table udplite_sysctl_table[] = {
172 {
173 .ctl_name = CTL_UNNUMBERED,
174 .procname = "nf_conntrack_udplite_timeout",
175 .data = &nf_ct_udplite_timeout,
176 .maxlen = sizeof(unsigned int),
177 .mode = 0644,
178 .proc_handler = &proc_dointvec_jiffies,
179 },
180 {
181 .ctl_name = CTL_UNNUMBERED,
182 .procname = "nf_conntrack_udplite_timeout_stream",
183 .data = &nf_ct_udplite_timeout_stream,
184 .maxlen = sizeof(unsigned int),
185 .mode = 0644,
186 .proc_handler = &proc_dointvec_jiffies,
187 },
188 {
189 .ctl_name = 0
190 }
191};
192#endif /* CONFIG_SYSCTL */
193
194static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
195{
196 .l3proto = PF_INET,
197 .l4proto = IPPROTO_UDPLITE,
198 .name = "udplite",
199 .pkt_to_tuple = udplite_pkt_to_tuple,
200 .invert_tuple = udplite_invert_tuple,
201 .print_tuple = udplite_print_tuple,
202 .print_conntrack = udplite_print_conntrack,
203 .packet = udplite_packet,
204 .new = udplite_new,
205 .error = udplite_error,
206#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
207 .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
208 .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
209#endif
210#ifdef CONFIG_SYSCTL
211 .ctl_table_users = &udplite_sysctl_table_users,
212 .ctl_table_header = &udplite_sysctl_header,
213 .ctl_table = udplite_sysctl_table,
214#endif
215};
216
217static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
218{
219 .l3proto = PF_INET6,
220 .l4proto = IPPROTO_UDPLITE,
221 .name = "udplite",
222 .pkt_to_tuple = udplite_pkt_to_tuple,
223 .invert_tuple = udplite_invert_tuple,
224 .print_tuple = udplite_print_tuple,
225 .print_conntrack = udplite_print_conntrack,
226 .packet = udplite_packet,
227 .new = udplite_new,
228 .error = udplite_error,
229#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
230 .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
231 .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
232#endif
233#ifdef CONFIG_SYSCTL
234 .ctl_table_users = &udplite_sysctl_table_users,
235 .ctl_table_header = &udplite_sysctl_header,
236 .ctl_table = udplite_sysctl_table,
237#endif
238};
239
240static int __init nf_conntrack_proto_udplite_init(void)
241{
242 int err;
243
244 err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite4);
245 if (err < 0)
246 goto err1;
247 err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite6);
248 if (err < 0)
249 goto err2;
250 return 0;
251err2:
252 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
253err1:
254 return err;
255}
256
257static void __exit nf_conntrack_proto_udplite_exit(void)
258{
259 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
260 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
261}
262
263module_init(nf_conntrack_proto_udplite_init);
264module_exit(nf_conntrack_proto_udplite_exit);
265
266MODULE_LICENSE("GPL");
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
new file mode 100644
index 000000000000..3335dd5be962
--- /dev/null
+++ b/net/netfilter/xt_connlimit.c
@@ -0,0 +1,313 @@
1/*
2 * netfilter module to limit the number of parallel tcp
3 * connections per IP address.
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections
7 * Copyright © Jan Engelhardt <jengelh@gmx.de>, 2007
8 *
9 * based on ...
10 *
11 * Kernel module to match connection tracking information.
12 * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
13 */
14#include <linux/in.h>
15#include <linux/in6.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/jhash.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/random.h>
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/netfilter/nf_conntrack_tcp.h>
25#include <linux/netfilter/x_tables.h>
26#include <linux/netfilter/xt_connlimit.h>
27#include <net/netfilter/nf_conntrack.h>
28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_tuple.h>
30
31/* we will save the tuples of all connections we care about */
32struct xt_connlimit_conn {
33 struct list_head list;
34 struct nf_conntrack_tuple tuple;
35};
36
37struct xt_connlimit_data {
38 struct list_head iphash[256];
39 spinlock_t lock;
40};
41
42static u_int32_t connlimit_rnd;
43static bool connlimit_rnd_inited;
44
45static inline unsigned int connlimit_iphash(u_int32_t addr)
46{
47 if (unlikely(!connlimit_rnd_inited)) {
48 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
49 connlimit_rnd_inited = true;
50 }
51 return jhash_1word(addr, connlimit_rnd) & 0xFF;
52}
53
54static inline unsigned int
55connlimit_iphash6(const union nf_conntrack_address *addr,
56 const union nf_conntrack_address *mask)
57{
58 union nf_conntrack_address res;
59 unsigned int i;
60
61 if (unlikely(!connlimit_rnd_inited)) {
62 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
63 connlimit_rnd_inited = true;
64 }
65
66 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
67 res.ip6[i] = addr->ip6[i] & mask->ip6[i];
68
69 return jhash2(res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
70}
71
72static inline bool already_closed(const struct nf_conn *conn)
73{
74 u_int16_t proto = conn->tuplehash[0].tuple.dst.protonum;
75
76 if (proto == IPPROTO_TCP)
77 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT;
78 else
79 return 0;
80}
81
82static inline unsigned int
83same_source_net(const union nf_conntrack_address *addr,
84 const union nf_conntrack_address *mask,
85 const union nf_conntrack_address *u3, unsigned int family)
86{
87 if (family == AF_INET) {
88 return (addr->ip & mask->ip) == (u3->ip & mask->ip);
89 } else {
90 union nf_conntrack_address lh, rh;
91 unsigned int i;
92
93 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) {
94 lh.ip6[i] = addr->ip6[i] & mask->ip6[i];
95 rh.ip6[i] = u3->ip6[i] & mask->ip6[i];
96 }
97
98 return memcmp(&lh.ip6, &rh.ip6, sizeof(lh.ip6)) == 0;
99 }
100}
101
102static int count_them(struct xt_connlimit_data *data,
103 const struct nf_conntrack_tuple *tuple,
104 const union nf_conntrack_address *addr,
105 const union nf_conntrack_address *mask,
106 const struct xt_match *match)
107{
108 struct nf_conntrack_tuple_hash *found;
109 struct xt_connlimit_conn *conn;
110 struct xt_connlimit_conn *tmp;
111 struct nf_conn *found_ct;
112 struct list_head *hash;
113 bool addit = true;
114 int matches = 0;
115
116
117 if (match->family == AF_INET6)
118 hash = &data->iphash[connlimit_iphash6(addr, mask)];
119 else
120 hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)];
121
122 read_lock_bh(&nf_conntrack_lock);
123
124 /* check the saved connections */
125 list_for_each_entry_safe(conn, tmp, hash, list) {
126 found = __nf_conntrack_find(&conn->tuple, NULL);
127 found_ct = NULL;
128
129 if (found != NULL)
130 found_ct = nf_ct_tuplehash_to_ctrack(found);
131
132 if (found_ct != NULL &&
133 nf_ct_tuple_equal(&conn->tuple, tuple) &&
134 !already_closed(found_ct))
135 /*
136 * Just to be sure we have it only once in the list.
137 * We should not see tuples twice unless someone hooks
138 * this into a table without "-p tcp --syn".
139 */
140 addit = false;
141
142 if (found == NULL) {
143 /* this one is gone */
144 list_del(&conn->list);
145 kfree(conn);
146 continue;
147 }
148
149 if (already_closed(found_ct)) {
150 /*
151 * we do not care about connections which are
152 * closed already -> ditch it
153 */
154 list_del(&conn->list);
155 kfree(conn);
156 continue;
157 }
158
159 if (same_source_net(addr, mask, &conn->tuple.src.u3,
160 match->family))
161 /* same source network -> be counted! */
162 ++matches;
163 }
164
165 read_unlock_bh(&nf_conntrack_lock);
166
167 if (addit) {
168 /* save the new connection in our list */
169 conn = kzalloc(sizeof(*conn), GFP_ATOMIC);
170 if (conn == NULL)
171 return -ENOMEM;
172 conn->tuple = *tuple;
173 list_add(&conn->list, hash);
174 ++matches;
175 }
176
177 return matches;
178}
179
180static bool connlimit_match(const struct sk_buff *skb,
181 const struct net_device *in,
182 const struct net_device *out,
183 const struct xt_match *match,
184 const void *matchinfo, int offset,
185 unsigned int protoff, bool *hotdrop)
186{
187 const struct xt_connlimit_info *info = matchinfo;
188 union nf_conntrack_address addr, mask;
189 struct nf_conntrack_tuple tuple;
190 const struct nf_conntrack_tuple *tuple_ptr = &tuple;
191 enum ip_conntrack_info ctinfo;
192 const struct nf_conn *ct;
193 int connections;
194
195 ct = nf_ct_get(skb, &ctinfo);
196 if (ct != NULL)
197 tuple_ptr = &ct->tuplehash[0].tuple;
198 else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
199 match->family, &tuple))
200 goto hotdrop;
201
202 if (match->family == AF_INET6) {
203 const struct ipv6hdr *iph = ipv6_hdr(skb);
204 memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
205 memcpy(&mask.ip6, info->v6_mask, sizeof(info->v6_mask));
206 } else {
207 const struct iphdr *iph = ip_hdr(skb);
208 addr.ip = iph->saddr;
209 mask.ip = info->v4_mask;
210 }
211
212 spin_lock_bh(&info->data->lock);
213 connections = count_them(info->data, tuple_ptr, &addr, &mask, match);
214 spin_unlock_bh(&info->data->lock);
215
216 if (connections < 0) {
217 /* kmalloc failed, drop it entirely */
218 *hotdrop = true;
219 return false;
220 }
221
222 return (connections > info->limit) ^ info->inverse;
223
224 hotdrop:
225 *hotdrop = true;
226 return false;
227}
228
229static bool connlimit_check(const char *tablename, const void *ip,
230 const struct xt_match *match, void *matchinfo,
231 unsigned int hook_mask)
232{
233 struct xt_connlimit_info *info = matchinfo;
234 unsigned int i;
235
236 if (nf_ct_l3proto_try_module_get(match->family) < 0) {
237 printk(KERN_WARNING "cannot load conntrack support for "
238 "address family %u\n", match->family);
239 return false;
240 }
241
242 /* init private data */
243 info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
244 if (info->data == NULL) {
245 nf_ct_l3proto_module_put(match->family);
246 return false;
247 }
248
249 spin_lock_init(&info->data->lock);
250 for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
251 INIT_LIST_HEAD(&info->data->iphash[i]);
252
253 return true;
254}
255
256static void connlimit_destroy(const struct xt_match *match, void *matchinfo)
257{
258 struct xt_connlimit_info *info = matchinfo;
259 struct xt_connlimit_conn *conn;
260 struct xt_connlimit_conn *tmp;
261 struct list_head *hash = info->data->iphash;
262 unsigned int i;
263
264 nf_ct_l3proto_module_put(match->family);
265
266 for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
267 list_for_each_entry_safe(conn, tmp, &hash[i], list) {
268 list_del(&conn->list);
269 kfree(conn);
270 }
271 }
272
273 kfree(info->data);
274}
275
276static struct xt_match connlimit_reg[] __read_mostly = {
277 {
278 .name = "connlimit",
279 .family = AF_INET,
280 .checkentry = connlimit_check,
281 .match = connlimit_match,
282 .matchsize = sizeof(struct xt_connlimit_info),
283 .destroy = connlimit_destroy,
284 .me = THIS_MODULE,
285 },
286 {
287 .name = "connlimit",
288 .family = AF_INET6,
289 .checkentry = connlimit_check,
290 .match = connlimit_match,
291 .matchsize = sizeof(struct xt_connlimit_info),
292 .destroy = connlimit_destroy,
293 .me = THIS_MODULE,
294 },
295};
296
297static int __init xt_connlimit_init(void)
298{
299 return xt_register_matches(connlimit_reg, ARRAY_SIZE(connlimit_reg));
300}
301
302static void __exit xt_connlimit_exit(void)
303{
304 xt_unregister_matches(connlimit_reg, ARRAY_SIZE(connlimit_reg));
305}
306
307module_init(xt_connlimit_init);
308module_exit(xt_connlimit_exit);
309MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
310MODULE_DESCRIPTION("netfilter xt_connlimit match module");
311MODULE_LICENSE("GPL");
312MODULE_ALIAS("ipt_connlimit");
313MODULE_ALIAS("ip6t_connlimit");
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index e5c840c30284..230e35c59786 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -55,7 +55,7 @@ static void rfkill_task_handler(struct work_struct *work)
55 55
56static void rfkill_schedule_toggle(struct rfkill_task *task) 56static void rfkill_schedule_toggle(struct rfkill_task *task)
57{ 57{
58 unsigned int flags; 58 unsigned long flags;
59 59
60 spin_lock_irqsave(&task->lock, flags); 60 spin_lock_irqsave(&task->lock, flags);
61 61
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index b4662888bdbd..d3f7c3f9407a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -472,12 +472,12 @@ config NET_ACT_SIMP
472 472
473config NET_CLS_POLICE 473config NET_CLS_POLICE
474 bool "Traffic Policing (obsolete)" 474 bool "Traffic Policing (obsolete)"
475 depends on NET_CLS_ACT!=y 475 select NET_CLS_ACT
476 select NET_ACT_POLICE
476 ---help--- 477 ---help---
477 Say Y here if you want to do traffic policing, i.e. strict 478 Say Y here if you want to do traffic policing, i.e. strict
478 bandwidth limiting. This option is obsoleted by the traffic 479 bandwidth limiting. This option is obsolete and just selects
479 policer implemented as action, it stays here for compatibility 480 the option replacing it. It will be removed in the future.
480 reasons.
481 481
482config NET_CLS_IND 482config NET_CLS_IND
483 bool "Incoming device classification" 483 bool "Incoming device classification"
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 020767a204d4..b67c36f65cf2 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
8obj-$(CONFIG_NET_CLS) += cls_api.o 8obj-$(CONFIG_NET_CLS) += cls_api.o
9obj-$(CONFIG_NET_CLS_ACT) += act_api.o 9obj-$(CONFIG_NET_CLS_ACT) += act_api.o
10obj-$(CONFIG_NET_ACT_POLICE) += act_police.o 10obj-$(CONFIG_NET_ACT_POLICE) += act_police.o
11obj-$(CONFIG_NET_CLS_POLICE) += act_police.o
12obj-$(CONFIG_NET_ACT_GACT) += act_gact.o 11obj-$(CONFIG_NET_ACT_GACT) += act_gact.o
13obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o 12obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o
14obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o 13obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index d20403890877..bf90e60f8411 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -50,7 +50,6 @@ struct tc_police_compat
50 50
51/* Each policer is serialized by its individual spinlock */ 51/* Each policer is serialized by its individual spinlock */
52 52
53#ifdef CONFIG_NET_CLS_ACT
54static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, 53static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
55 int type, struct tc_action *a) 54 int type, struct tc_action *a)
56{ 55{
@@ -96,9 +95,8 @@ rtattr_failure:
96 nlmsg_trim(skb, r); 95 nlmsg_trim(skb, r);
97 goto done; 96 goto done;
98} 97}
99#endif
100 98
101void tcf_police_destroy(struct tcf_police *p) 99static void tcf_police_destroy(struct tcf_police *p)
102{ 100{
103 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); 101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
104 struct tcf_common **p1p; 102 struct tcf_common **p1p;
@@ -121,7 +119,6 @@ void tcf_police_destroy(struct tcf_police *p)
121 BUG_TRAP(0); 119 BUG_TRAP(0);
122} 120}
123 121
124#ifdef CONFIG_NET_CLS_ACT
125static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, 122static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
126 struct tc_action *a, int ovr, int bind) 123 struct tc_action *a, int ovr, int bind)
127{ 124{
@@ -247,10 +244,19 @@ failure:
247static int tcf_act_police_cleanup(struct tc_action *a, int bind) 244static int tcf_act_police_cleanup(struct tc_action *a, int bind)
248{ 245{
249 struct tcf_police *p = a->priv; 246 struct tcf_police *p = a->priv;
247 int ret = 0;
250 248
251 if (p != NULL) 249 if (p != NULL) {
252 return tcf_police_release(p, bind); 250 if (bind)
253 return 0; 251 p->tcf_bindcnt--;
252
253 p->tcf_refcnt--;
254 if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) {
255 tcf_police_destroy(p);
256 ret = 1;
257 }
258 }
259 return ret;
254} 260}
255 261
256static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 262static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
@@ -372,229 +378,3 @@ police_cleanup_module(void)
372 378
373module_init(police_init_module); 379module_init(police_init_module);
374module_exit(police_cleanup_module); 380module_exit(police_cleanup_module);
375
376#else /* CONFIG_NET_CLS_ACT */
377
378static struct tcf_common *tcf_police_lookup(u32 index)
379{
380 struct tcf_hashinfo *hinfo = &police_hash_info;
381 struct tcf_common *p;
382
383 read_lock(hinfo->lock);
384 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
385 p = p->tcfc_next) {
386 if (p->tcfc_index == index)
387 break;
388 }
389 read_unlock(hinfo->lock);
390
391 return p;
392}
393
394static u32 tcf_police_new_index(void)
395{
396 u32 *idx_gen = &police_idx_gen;
397 u32 val = *idx_gen;
398
399 do {
400 if (++val == 0)
401 val = 1;
402 } while (tcf_police_lookup(val));
403
404 return (*idx_gen = val);
405}
406
407struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
408{
409 unsigned int h;
410 struct tcf_police *police;
411 struct rtattr *tb[TCA_POLICE_MAX];
412 struct tc_police *parm;
413 int size;
414
415 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
416 return NULL;
417
418 if (tb[TCA_POLICE_TBF-1] == NULL)
419 return NULL;
420 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
421 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
422 return NULL;
423
424 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
425
426 if (parm->index) {
427 struct tcf_common *pc;
428
429 pc = tcf_police_lookup(parm->index);
430 if (pc) {
431 police = to_police(pc);
432 police->tcf_refcnt++;
433 return police;
434 }
435 }
436 police = kzalloc(sizeof(*police), GFP_KERNEL);
437 if (unlikely(!police))
438 return NULL;
439
440 police->tcf_refcnt = 1;
441 spin_lock_init(&police->tcf_lock);
442 if (parm->rate.rate) {
443 police->tcfp_R_tab =
444 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
445 if (police->tcfp_R_tab == NULL)
446 goto failure;
447 if (parm->peakrate.rate) {
448 police->tcfp_P_tab =
449 qdisc_get_rtab(&parm->peakrate,
450 tb[TCA_POLICE_PEAKRATE-1]);
451 if (police->tcfp_P_tab == NULL)
452 goto failure;
453 }
454 }
455 if (tb[TCA_POLICE_RESULT-1]) {
456 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
457 goto failure;
458 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
459 }
460 if (tb[TCA_POLICE_AVRATE-1]) {
461 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
462 goto failure;
463 police->tcfp_ewma_rate =
464 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
465 }
466 police->tcfp_toks = police->tcfp_burst = parm->burst;
467 police->tcfp_mtu = parm->mtu;
468 if (police->tcfp_mtu == 0) {
469 police->tcfp_mtu = ~0;
470 if (police->tcfp_R_tab)
471 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
472 }
473 if (police->tcfp_P_tab)
474 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
475 police->tcfp_t_c = psched_get_time();
476 police->tcf_index = parm->index ? parm->index :
477 tcf_police_new_index();
478 police->tcf_action = parm->action;
479 if (est)
480 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
481 &police->tcf_lock, est);
482 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
483 write_lock_bh(&police_lock);
484 police->tcf_next = tcf_police_ht[h];
485 tcf_police_ht[h] = &police->common;
486 write_unlock_bh(&police_lock);
487 return police;
488
489failure:
490 if (police->tcfp_R_tab)
491 qdisc_put_rtab(police->tcfp_R_tab);
492 kfree(police);
493 return NULL;
494}
495
496int tcf_police(struct sk_buff *skb, struct tcf_police *police)
497{
498 psched_time_t now;
499 long toks;
500 long ptoks = 0;
501
502 spin_lock(&police->tcf_lock);
503
504 police->tcf_bstats.bytes += skb->len;
505 police->tcf_bstats.packets++;
506
507 if (police->tcfp_ewma_rate &&
508 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
509 police->tcf_qstats.overlimits++;
510 spin_unlock(&police->tcf_lock);
511 return police->tcf_action;
512 }
513 if (skb->len <= police->tcfp_mtu) {
514 if (police->tcfp_R_tab == NULL) {
515 spin_unlock(&police->tcf_lock);
516 return police->tcfp_result;
517 }
518
519 now = psched_get_time();
520 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
521 police->tcfp_burst);
522 if (police->tcfp_P_tab) {
523 ptoks = toks + police->tcfp_ptoks;
524 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
525 ptoks = (long)L2T_P(police, police->tcfp_mtu);
526 ptoks -= L2T_P(police, skb->len);
527 }
528 toks += police->tcfp_toks;
529 if (toks > (long)police->tcfp_burst)
530 toks = police->tcfp_burst;
531 toks -= L2T(police, skb->len);
532 if ((toks|ptoks) >= 0) {
533 police->tcfp_t_c = now;
534 police->tcfp_toks = toks;
535 police->tcfp_ptoks = ptoks;
536 spin_unlock(&police->tcf_lock);
537 return police->tcfp_result;
538 }
539 }
540
541 police->tcf_qstats.overlimits++;
542 spin_unlock(&police->tcf_lock);
543 return police->tcf_action;
544}
545EXPORT_SYMBOL(tcf_police);
546
547int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
548{
549 unsigned char *b = skb_tail_pointer(skb);
550 struct tc_police opt;
551
552 opt.index = police->tcf_index;
553 opt.action = police->tcf_action;
554 opt.mtu = police->tcfp_mtu;
555 opt.burst = police->tcfp_burst;
556 if (police->tcfp_R_tab)
557 opt.rate = police->tcfp_R_tab->rate;
558 else
559 memset(&opt.rate, 0, sizeof(opt.rate));
560 if (police->tcfp_P_tab)
561 opt.peakrate = police->tcfp_P_tab->rate;
562 else
563 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
564 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
565 if (police->tcfp_result)
566 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
567 &police->tcfp_result);
568 if (police->tcfp_ewma_rate)
569 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
570 return skb->len;
571
572rtattr_failure:
573 nlmsg_trim(skb, b);
574 return -1;
575}
576
577int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
578{
579 struct gnet_dump d;
580
581 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
582 TCA_XSTATS, &police->tcf_lock,
583 &d) < 0)
584 goto errout;
585
586 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
587 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
588 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
589 goto errout;
590
591 if (gnet_stats_finish_copy(&d) < 0)
592 goto errout;
593
594 return 0;
595
596errout:
597 return -1;
598}
599
600#endif /* CONFIG_NET_CLS_ACT */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 36b72aab1bde..5f0fbca7393f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -458,11 +458,6 @@ tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
458 tcf_action_destroy(exts->action, TCA_ACT_UNBIND); 458 tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
459 exts->action = NULL; 459 exts->action = NULL;
460 } 460 }
461#elif defined CONFIG_NET_CLS_POLICE
462 if (exts->police) {
463 tcf_police_release(exts->police, TCA_ACT_UNBIND);
464 exts->police = NULL;
465 }
466#endif 461#endif
467} 462}
468 463
@@ -496,17 +491,6 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
496 exts->action = act; 491 exts->action = act;
497 } 492 }
498 } 493 }
499#elif defined CONFIG_NET_CLS_POLICE
500 if (map->police && tb[map->police-1]) {
501 struct tcf_police *p;
502
503 p = tcf_police_locate(tb[map->police-1], rate_tlv);
504 if (p == NULL)
505 return -EINVAL;
506
507 exts->police = p;
508 } else if (map->action && tb[map->action-1])
509 return -EOPNOTSUPP;
510#else 494#else
511 if ((map->action && tb[map->action-1]) || 495 if ((map->action && tb[map->action-1]) ||
512 (map->police && tb[map->police-1])) 496 (map->police && tb[map->police-1]))
@@ -529,15 +513,6 @@ tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
529 if (act) 513 if (act)
530 tcf_action_destroy(act, TCA_ACT_UNBIND); 514 tcf_action_destroy(act, TCA_ACT_UNBIND);
531 } 515 }
532#elif defined CONFIG_NET_CLS_POLICE
533 if (src->police) {
534 struct tcf_police *p;
535 tcf_tree_lock(tp);
536 p = xchg(&dst->police, src->police);
537 tcf_tree_unlock(tp);
538 if (p)
539 tcf_police_release(p, TCA_ACT_UNBIND);
540 }
541#endif 516#endif
542} 517}
543 518
@@ -566,17 +541,6 @@ tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
566 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 541 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
567 } 542 }
568 } 543 }
569#elif defined CONFIG_NET_CLS_POLICE
570 if (map->police && exts->police) {
571 struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
572
573 RTA_PUT(skb, map->police, 0, NULL);
574
575 if (tcf_police_dump(skb, exts->police) < 0)
576 goto rtattr_failure;
577
578 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
579 }
580#endif 544#endif
581 return 0; 545 return 0;
582rtattr_failure: __attribute__ ((unused)) 546rtattr_failure: __attribute__ ((unused))
@@ -591,10 +555,6 @@ tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
591 if (exts->action) 555 if (exts->action)
592 if (tcf_action_copy_stats(skb, exts->action, 1) < 0) 556 if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
593 goto rtattr_failure; 557 goto rtattr_failure;
594#elif defined CONFIG_NET_CLS_POLICE
595 if (exts->police)
596 if (tcf_police_dump_stats(skb, exts->police) < 0)
597 goto rtattr_failure;
598#endif 558#endif
599 return 0; 559 return 0;
600rtattr_failure: __attribute__ ((unused)) 560rtattr_failure: __attribute__ ((unused))
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 77961e2314dc..8dbe36912ecb 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -782,9 +782,6 @@ static int __init init_u32(void)
782#ifdef CONFIG_CLS_U32_PERF 782#ifdef CONFIG_CLS_U32_PERF
783 printk(" Performance counters on\n"); 783 printk(" Performance counters on\n");
784#endif 784#endif
785#ifdef CONFIG_NET_CLS_POLICE
786 printk(" OLD policer on \n");
787#endif
788#ifdef CONFIG_NET_CLS_IND 785#ifdef CONFIG_NET_CLS_IND
789 printk(" input device check on \n"); 786 printk(" input device check on \n");
790#endif 787#endif
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d92ea26982c5..13c09bc32aa3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -278,11 +278,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
278 278
279 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 279 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
280 smp_wmb(); 280 smp_wmb();
281 if (spin_trylock(&dev->queue_lock)) { 281 netif_schedule(dev);
282 qdisc_run(dev);
283 spin_unlock(&dev->queue_lock);
284 } else
285 netif_schedule(dev);
286 282
287 return HRTIMER_NORESTART; 283 return HRTIMER_NORESTART;
288} 284}
@@ -1149,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1149 to this qdisc, (optionally) tests for protocol and asks 1145 to this qdisc, (optionally) tests for protocol and asks
1150 specific classifiers. 1146 specific classifiers.
1151 */ 1147 */
1148int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1149 struct tcf_result *res)
1150{
1151 __be16 protocol = skb->protocol;
1152 int err = 0;
1153
1154 for (; tp; tp = tp->next) {
1155 if ((tp->protocol == protocol ||
1156 tp->protocol == htons(ETH_P_ALL)) &&
1157 (err = tp->classify(skb, tp, res)) >= 0) {
1158#ifdef CONFIG_NET_CLS_ACT
1159 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1160 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1161#endif
1162 return err;
1163 }
1164 }
1165 return -1;
1166}
1167EXPORT_SYMBOL(tc_classify_compat);
1168
1152int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1169int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1153 struct tcf_result *res) 1170 struct tcf_result *res)
1154{ 1171{
1155 int err = 0; 1172 int err = 0;
1156 __be16 protocol = skb->protocol; 1173 __be16 protocol;
1157#ifdef CONFIG_NET_CLS_ACT 1174#ifdef CONFIG_NET_CLS_ACT
1158 struct tcf_proto *otp = tp; 1175 struct tcf_proto *otp = tp;
1159reclassify: 1176reclassify:
1160#endif 1177#endif
1161 protocol = skb->protocol; 1178 protocol = skb->protocol;
1162 1179
1163 for ( ; tp; tp = tp->next) { 1180 err = tc_classify_compat(skb, tp, res);
1164 if ((tp->protocol == protocol ||
1165 tp->protocol == htons(ETH_P_ALL)) &&
1166 (err = tp->classify(skb, tp, res)) >= 0) {
1167#ifdef CONFIG_NET_CLS_ACT 1181#ifdef CONFIG_NET_CLS_ACT
1168 if ( TC_ACT_RECLASSIFY == err) { 1182 if (err == TC_ACT_RECLASSIFY) {
1169 __u32 verd = (__u32) G_TC_VERD(skb->tc_verd); 1183 u32 verd = G_TC_VERD(skb->tc_verd);
1170 tp = otp; 1184 tp = otp;
1171 1185
1172 if (MAX_REC_LOOP < verd++) { 1186 if (verd++ >= MAX_REC_LOOP) {
1173 printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n", 1187 printk("rule prio %u protocol %02x reclassify loop, "
1174 tp->prio&0xffff, ntohs(tp->protocol)); 1188 "packet dropped\n",
1175 return TC_ACT_SHOT; 1189 tp->prio&0xffff, ntohs(tp->protocol));
1176 } 1190 return TC_ACT_SHOT;
1177 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
1178 goto reclassify;
1179 } else {
1180 if (skb->tc_verd)
1181 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
1182 return err;
1183 }
1184#else
1185
1186 return err;
1187#endif
1188 } 1191 }
1189 1192 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1193 goto reclassify;
1190 } 1194 }
1191 return -1; 1195#endif
1196 return err;
1192} 1197}
1198EXPORT_SYMBOL(tc_classify);
1193 1199
1194void tcf_destroy(struct tcf_proto *tp) 1200void tcf_destroy(struct tcf_proto *tp)
1195{ 1201{
@@ -1256,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab);
1256EXPORT_SYMBOL(qdisc_put_rtab); 1262EXPORT_SYMBOL(qdisc_put_rtab);
1257EXPORT_SYMBOL(register_qdisc); 1263EXPORT_SYMBOL(register_qdisc);
1258EXPORT_SYMBOL(unregister_qdisc); 1264EXPORT_SYMBOL(unregister_qdisc);
1259EXPORT_SYMBOL(tc_classify);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 54b92d22796c..417ec8fb7f1a 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -2,7 +2,6 @@
2 2
3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4 4
5
6#include <linux/module.h> 5#include <linux/module.h>
7#include <linux/init.h> 6#include <linux/init.h>
8#include <linux/string.h> 7#include <linux/string.h>
@@ -11,12 +10,11 @@
11#include <linux/atmdev.h> 10#include <linux/atmdev.h>
12#include <linux/atmclip.h> 11#include <linux/atmclip.h>
13#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
14#include <linux/file.h> /* for fput */ 13#include <linux/file.h> /* for fput */
15#include <net/netlink.h> 14#include <net/netlink.h>
16#include <net/pkt_sched.h> 15#include <net/pkt_sched.h>
17 16
18 17extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
19extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
20 18
21#if 0 /* control */ 19#if 0 /* control */
22#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 20#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
@@ -30,7 +28,6 @@ extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
30#define D2PRINTK(format,args...) 28#define D2PRINTK(format,args...)
31#endif 29#endif
32 30
33
34/* 31/*
35 * The ATM queuing discipline provides a framework for invoking classifiers 32 * The ATM queuing discipline provides a framework for invoking classifiers
36 * (aka "filters"), which in turn select classes of this queuing discipline. 33 * (aka "filters"), which in turn select classes of this queuing discipline.
@@ -52,16 +49,15 @@ extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
52 * - should lock the flow while there is data in the queue (?) 49 * - should lock the flow while there is data in the queue (?)
53 */ 50 */
54 51
55
56#define PRIV(sch) qdisc_priv(sch) 52#define PRIV(sch) qdisc_priv(sch)
57#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) 53#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
58 54
59
60struct atm_flow_data { 55struct atm_flow_data {
61 struct Qdisc *q; /* FIFO, TBF, etc. */ 56 struct Qdisc *q; /* FIFO, TBF, etc. */
62 struct tcf_proto *filter_list; 57 struct tcf_proto *filter_list;
63 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ 58 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
64 void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* chaining */ 59 void (*old_pop)(struct atm_vcc *vcc,
60 struct sk_buff * skb); /* chaining */
65 struct atm_qdisc_data *parent; /* parent qdisc */ 61 struct atm_qdisc_data *parent; /* parent qdisc */
66 struct socket *sock; /* for closing */ 62 struct socket *sock; /* for closing */
67 u32 classid; /* x:y type ID */ 63 u32 classid; /* x:y type ID */
@@ -82,76 +78,74 @@ struct atm_qdisc_data {
82 struct tasklet_struct task; /* requeue tasklet */ 78 struct tasklet_struct task; /* requeue tasklet */
83}; 79};
84 80
85
86/* ------------------------- Class/flow operations ------------------------- */ 81/* ------------------------- Class/flow operations ------------------------- */
87 82
88 83static int find_flow(struct atm_qdisc_data *qdisc, struct atm_flow_data *flow)
89static int find_flow(struct atm_qdisc_data *qdisc,struct atm_flow_data *flow)
90{ 84{
91 struct atm_flow_data *walk; 85 struct atm_flow_data *walk;
92 86
93 DPRINTK("find_flow(qdisc %p,flow %p)\n",qdisc,flow); 87 DPRINTK("find_flow(qdisc %p,flow %p)\n", qdisc, flow);
94 for (walk = qdisc->flows; walk; walk = walk->next) 88 for (walk = qdisc->flows; walk; walk = walk->next)
95 if (walk == flow) return 1; 89 if (walk == flow)
90 return 1;
96 DPRINTK("find_flow: not found\n"); 91 DPRINTK("find_flow: not found\n");
97 return 0; 92 return 0;
98} 93}
99 94
100 95static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
101static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
102 u32 classid)
103{ 96{
104 struct atm_qdisc_data *p = PRIV(sch); 97 struct atm_qdisc_data *p = PRIV(sch);
105 struct atm_flow_data *flow; 98 struct atm_flow_data *flow;
106 99
107 for (flow = p->flows; flow; flow = flow->next) 100 for (flow = p->flows; flow; flow = flow->next)
108 if (flow->classid == classid) break; 101 if (flow->classid == classid)
102 break;
109 return flow; 103 return flow;
110} 104}
111 105
112 106static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
113static int atm_tc_graft(struct Qdisc *sch,unsigned long arg, 107 struct Qdisc *new, struct Qdisc **old)
114 struct Qdisc *new,struct Qdisc **old)
115{ 108{
116 struct atm_qdisc_data *p = PRIV(sch); 109 struct atm_qdisc_data *p = PRIV(sch);
117 struct atm_flow_data *flow = (struct atm_flow_data *) arg; 110 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
118 111
119 DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",sch, 112 DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
120 p,flow,new,old); 113 sch, p, flow, new, old);
121 if (!find_flow(p,flow)) return -EINVAL; 114 if (!find_flow(p, flow))
122 if (!new) new = &noop_qdisc; 115 return -EINVAL;
123 *old = xchg(&flow->q,new); 116 if (!new)
124 if (*old) qdisc_reset(*old); 117 new = &noop_qdisc;
118 *old = xchg(&flow->q, new);
119 if (*old)
120 qdisc_reset(*old);
125 return 0; 121 return 0;
126} 122}
127 123
128 124static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
129static struct Qdisc *atm_tc_leaf(struct Qdisc *sch,unsigned long cl)
130{ 125{
131 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 126 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
132 127
133 DPRINTK("atm_tc_leaf(sch %p,flow %p)\n",sch,flow); 128 DPRINTK("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
134 return flow ? flow->q : NULL; 129 return flow ? flow->q : NULL;
135} 130}
136 131
137 132static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
138static unsigned long atm_tc_get(struct Qdisc *sch,u32 classid)
139{ 133{
140 struct atm_qdisc_data *p __attribute__((unused)) = PRIV(sch); 134 struct atm_qdisc_data *p __maybe_unused = PRIV(sch);
141 struct atm_flow_data *flow; 135 struct atm_flow_data *flow;
142 136
143 DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid); 137 DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
144 flow = lookup_flow(sch,classid); 138 flow = lookup_flow(sch, classid);
145 if (flow) flow->ref++; 139 if (flow)
146 DPRINTK("atm_tc_get: flow %p\n",flow); 140 flow->ref++;
147 return (unsigned long) flow; 141 DPRINTK("atm_tc_get: flow %p\n", flow);
142 return (unsigned long)flow;
148} 143}
149 144
150
151static unsigned long atm_tc_bind_filter(struct Qdisc *sch, 145static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
152 unsigned long parent, u32 classid) 146 unsigned long parent, u32 classid)
153{ 147{
154 return atm_tc_get(sch,classid); 148 return atm_tc_get(sch, classid);
155} 149}
156 150
157/* 151/*
@@ -159,72 +153,75 @@ static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
159 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop 153 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
160 * anything that still seems to be in use. 154 * anything that still seems to be in use.
161 */ 155 */
162
163static void atm_tc_put(struct Qdisc *sch, unsigned long cl) 156static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
164{ 157{
165 struct atm_qdisc_data *p = PRIV(sch); 158 struct atm_qdisc_data *p = PRIV(sch);
166 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 159 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
167 struct atm_flow_data **prev; 160 struct atm_flow_data **prev;
168 161
169 DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 162 DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
170 if (--flow->ref) return; 163 if (--flow->ref)
164 return;
171 DPRINTK("atm_tc_put: destroying\n"); 165 DPRINTK("atm_tc_put: destroying\n");
172 for (prev = &p->flows; *prev; prev = &(*prev)->next) 166 for (prev = &p->flows; *prev; prev = &(*prev)->next)
173 if (*prev == flow) break; 167 if (*prev == flow)
168 break;
174 if (!*prev) { 169 if (!*prev) {
175 printk(KERN_CRIT "atm_tc_put: class %p not found\n",flow); 170 printk(KERN_CRIT "atm_tc_put: class %p not found\n", flow);
176 return; 171 return;
177 } 172 }
178 *prev = flow->next; 173 *prev = flow->next;
179 DPRINTK("atm_tc_put: qdisc %p\n",flow->q); 174 DPRINTK("atm_tc_put: qdisc %p\n", flow->q);
180 qdisc_destroy(flow->q); 175 qdisc_destroy(flow->q);
181 tcf_destroy_chain(flow->filter_list); 176 tcf_destroy_chain(flow->filter_list);
182 if (flow->sock) { 177 if (flow->sock) {
183 DPRINTK("atm_tc_put: f_count %d\n", 178 DPRINTK("atm_tc_put: f_count %d\n",
184 file_count(flow->sock->file)); 179 file_count(flow->sock->file));
185 flow->vcc->pop = flow->old_pop; 180 flow->vcc->pop = flow->old_pop;
186 sockfd_put(flow->sock); 181 sockfd_put(flow->sock);
187 } 182 }
188 if (flow->excess) atm_tc_put(sch,(unsigned long) flow->excess); 183 if (flow->excess)
189 if (flow != &p->link) kfree(flow); 184 atm_tc_put(sch, (unsigned long)flow->excess);
185 if (flow != &p->link)
186 kfree(flow);
190 /* 187 /*
191 * If flow == &p->link, the qdisc no longer works at this point and 188 * If flow == &p->link, the qdisc no longer works at this point and
192 * needs to be removed. (By the caller of atm_tc_put.) 189 * needs to be removed. (By the caller of atm_tc_put.)
193 */ 190 */
194} 191}
195 192
196 193static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
197static void sch_atm_pop(struct atm_vcc *vcc,struct sk_buff *skb)
198{ 194{
199 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent; 195 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
200 196
201 D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n",vcc,skb,p); 197 D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
202 VCC2FLOW(vcc)->old_pop(vcc,skb); 198 VCC2FLOW(vcc)->old_pop(vcc, skb);
203 tasklet_schedule(&p->task); 199 tasklet_schedule(&p->task);
204} 200}
205 201
206static const u8 llc_oui_ip[] = { 202static const u8 llc_oui_ip[] = {
207 0xaa, /* DSAP: non-ISO */ 203 0xaa, /* DSAP: non-ISO */
208 0xaa, /* SSAP: non-ISO */ 204 0xaa, /* SSAP: non-ISO */
209 0x03, /* Ctrl: Unnumbered Information Command PDU */ 205 0x03, /* Ctrl: Unnumbered Information Command PDU */
210 0x00, /* OUI: EtherType */ 206 0x00, /* OUI: EtherType */
211 0x00, 0x00, 207 0x00, 0x00,
212 0x08, 0x00 }; /* Ethertype IP (0800) */ 208 0x08, 0x00
209}; /* Ethertype IP (0800) */
213 210
214static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, 211static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
215 struct rtattr **tca, unsigned long *arg) 212 struct rtattr **tca, unsigned long *arg)
216{ 213{
217 struct atm_qdisc_data *p = PRIV(sch); 214 struct atm_qdisc_data *p = PRIV(sch);
218 struct atm_flow_data *flow = (struct atm_flow_data *) *arg; 215 struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
219 struct atm_flow_data *excess = NULL; 216 struct atm_flow_data *excess = NULL;
220 struct rtattr *opt = tca[TCA_OPTIONS-1]; 217 struct rtattr *opt = tca[TCA_OPTIONS - 1];
221 struct rtattr *tb[TCA_ATM_MAX]; 218 struct rtattr *tb[TCA_ATM_MAX];
222 struct socket *sock; 219 struct socket *sock;
223 int fd,error,hdr_len; 220 int fd, error, hdr_len;
224 void *hdr; 221 void *hdr;
225 222
226 DPRINTK("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x," 223 DPRINTK("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
227 "flow %p,opt %p)\n",sch,p,classid,parent,flow,opt); 224 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
228 /* 225 /*
229 * The concept of parents doesn't apply for this qdisc. 226 * The concept of parents doesn't apply for this qdisc.
230 */ 227 */
@@ -237,33 +234,36 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
237 * class needs to be removed and a new one added. (This may be changed 234 * class needs to be removed and a new one added. (This may be changed
238 * later.) 235 * later.)
239 */ 236 */
240 if (flow) return -EBUSY; 237 if (flow)
238 return -EBUSY;
241 if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt)) 239 if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt))
242 return -EINVAL; 240 return -EINVAL;
243 if (!tb[TCA_ATM_FD-1] || RTA_PAYLOAD(tb[TCA_ATM_FD-1]) < sizeof(fd)) 241 if (!tb[TCA_ATM_FD - 1] || RTA_PAYLOAD(tb[TCA_ATM_FD - 1]) < sizeof(fd))
244 return -EINVAL; 242 return -EINVAL;
245 fd = *(int *) RTA_DATA(tb[TCA_ATM_FD-1]); 243 fd = *(int *)RTA_DATA(tb[TCA_ATM_FD - 1]);
246 DPRINTK("atm_tc_change: fd %d\n",fd); 244 DPRINTK("atm_tc_change: fd %d\n", fd);
247 if (tb[TCA_ATM_HDR-1]) { 245 if (tb[TCA_ATM_HDR - 1]) {
248 hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR-1]); 246 hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR - 1]);
249 hdr = RTA_DATA(tb[TCA_ATM_HDR-1]); 247 hdr = RTA_DATA(tb[TCA_ATM_HDR - 1]);
250 } 248 } else {
251 else {
252 hdr_len = RFC1483LLC_LEN; 249 hdr_len = RFC1483LLC_LEN;
253 hdr = NULL; /* default LLC/SNAP for IP */ 250 hdr = NULL; /* default LLC/SNAP for IP */
254 } 251 }
255 if (!tb[TCA_ATM_EXCESS-1]) excess = NULL; 252 if (!tb[TCA_ATM_EXCESS - 1])
253 excess = NULL;
256 else { 254 else {
257 if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS-1]) != sizeof(u32)) 255 if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS - 1]) != sizeof(u32))
258 return -EINVAL; 256 return -EINVAL;
259 excess = (struct atm_flow_data *) atm_tc_get(sch, 257 excess = (struct atm_flow_data *)
260 *(u32 *) RTA_DATA(tb[TCA_ATM_EXCESS-1])); 258 atm_tc_get(sch, *(u32 *)RTA_DATA(tb[TCA_ATM_EXCESS - 1]));
261 if (!excess) return -ENOENT; 259 if (!excess)
260 return -ENOENT;
262 } 261 }
263 DPRINTK("atm_tc_change: type %d, payload %d, hdr_len %d\n", 262 DPRINTK("atm_tc_change: type %d, payload %d, hdr_len %d\n",
264 opt->rta_type,RTA_PAYLOAD(opt),hdr_len); 263 opt->rta_type, RTA_PAYLOAD(opt), hdr_len);
265 if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */ 264 if (!(sock = sockfd_lookup(fd, &error)))
266 DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file)); 265 return error; /* f_count++ */
266 DPRINTK("atm_tc_change: f_count %d\n", file_count(sock->file));
267 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { 267 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
268 error = -EPROTOTYPE; 268 error = -EPROTOTYPE;
269 goto err_out; 269 goto err_out;
@@ -276,37 +276,37 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
276 error = -EINVAL; 276 error = -EINVAL;
277 goto err_out; 277 goto err_out;
278 } 278 }
279 if (find_flow(p,flow)) { 279 if (find_flow(p, flow)) {
280 error = -EEXIST; 280 error = -EEXIST;
281 goto err_out; 281 goto err_out;
282 } 282 }
283 } 283 } else {
284 else {
285 int i; 284 int i;
286 unsigned long cl; 285 unsigned long cl;
287 286
288 for (i = 1; i < 0x8000; i++) { 287 for (i = 1; i < 0x8000; i++) {
289 classid = TC_H_MAKE(sch->handle,0x8000 | i); 288 classid = TC_H_MAKE(sch->handle, 0x8000 | i);
290 if (!(cl = atm_tc_get(sch,classid))) break; 289 if (!(cl = atm_tc_get(sch, classid)))
291 atm_tc_put(sch,cl); 290 break;
291 atm_tc_put(sch, cl);
292 } 292 }
293 } 293 }
294 DPRINTK("atm_tc_change: new id %x\n",classid); 294 DPRINTK("atm_tc_change: new id %x\n", classid);
295 flow = kmalloc(sizeof(struct atm_flow_data)+hdr_len,GFP_KERNEL); 295 flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
296 DPRINTK("atm_tc_change: flow %p\n",flow); 296 DPRINTK("atm_tc_change: flow %p\n", flow);
297 if (!flow) { 297 if (!flow) {
298 error = -ENOBUFS; 298 error = -ENOBUFS;
299 goto err_out; 299 goto err_out;
300 } 300 }
301 memset(flow,0,sizeof(*flow)); 301 memset(flow, 0, sizeof(*flow));
302 flow->filter_list = NULL; 302 flow->filter_list = NULL;
303 if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,classid))) 303 if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
304 flow->q = &noop_qdisc; 304 flow->q = &noop_qdisc;
305 DPRINTK("atm_tc_change: qdisc %p\n",flow->q); 305 DPRINTK("atm_tc_change: qdisc %p\n", flow->q);
306 flow->sock = sock; 306 flow->sock = sock;
307 flow->vcc = ATM_SD(sock); /* speedup */ 307 flow->vcc = ATM_SD(sock); /* speedup */
308 flow->vcc->user_back = flow; 308 flow->vcc->user_back = flow;
309 DPRINTK("atm_tc_change: vcc %p\n",flow->vcc); 309 DPRINTK("atm_tc_change: vcc %p\n", flow->vcc);
310 flow->old_pop = flow->vcc->pop; 310 flow->old_pop = flow->vcc->pop;
311 flow->parent = p; 311 flow->parent = p;
312 flow->vcc->pop = sch_atm_pop; 312 flow->vcc->pop = sch_atm_pop;
@@ -317,50 +317,53 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
317 p->link.next = flow; 317 p->link.next = flow;
318 flow->hdr_len = hdr_len; 318 flow->hdr_len = hdr_len;
319 if (hdr) 319 if (hdr)
320 memcpy(flow->hdr,hdr,hdr_len); 320 memcpy(flow->hdr, hdr, hdr_len);
321 else 321 else
322 memcpy(flow->hdr,llc_oui_ip,sizeof(llc_oui_ip)); 322 memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
323 *arg = (unsigned long) flow; 323 *arg = (unsigned long)flow;
324 return 0; 324 return 0;
325err_out: 325err_out:
326 if (excess) atm_tc_put(sch,(unsigned long) excess); 326 if (excess)
327 atm_tc_put(sch, (unsigned long)excess);
327 sockfd_put(sock); 328 sockfd_put(sock);
328 return error; 329 return error;
329} 330}
330 331
331 332static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
332static int atm_tc_delete(struct Qdisc *sch,unsigned long arg)
333{ 333{
334 struct atm_qdisc_data *p = PRIV(sch); 334 struct atm_qdisc_data *p = PRIV(sch);
335 struct atm_flow_data *flow = (struct atm_flow_data *) arg; 335 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
336 336
337 DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 337 DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
338 if (!find_flow(PRIV(sch),flow)) return -EINVAL; 338 if (!find_flow(PRIV(sch), flow))
339 if (flow->filter_list || flow == &p->link) return -EBUSY; 339 return -EINVAL;
340 if (flow->filter_list || flow == &p->link)
341 return -EBUSY;
340 /* 342 /*
341 * Reference count must be 2: one for "keepalive" (set at class 343 * Reference count must be 2: one for "keepalive" (set at class
342 * creation), and one for the reference held when calling delete. 344 * creation), and one for the reference held when calling delete.
343 */ 345 */
344 if (flow->ref < 2) { 346 if (flow->ref < 2) {
345 printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n",flow->ref); 347 printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
346 return -EINVAL; 348 return -EINVAL;
347 } 349 }
348 if (flow->ref > 2) return -EBUSY; /* catch references via excess, etc.*/ 350 if (flow->ref > 2)
349 atm_tc_put(sch,arg); 351 return -EBUSY; /* catch references via excess, etc. */
352 atm_tc_put(sch, arg);
350 return 0; 353 return 0;
351} 354}
352 355
353 356static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
354static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker)
355{ 357{
356 struct atm_qdisc_data *p = PRIV(sch); 358 struct atm_qdisc_data *p = PRIV(sch);
357 struct atm_flow_data *flow; 359 struct atm_flow_data *flow;
358 360
359 DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker); 361 DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
360 if (walker->stop) return; 362 if (walker->stop)
363 return;
361 for (flow = p->flows; flow; flow = flow->next) { 364 for (flow = p->flows; flow; flow = flow->next) {
362 if (walker->count >= walker->skip) 365 if (walker->count >= walker->skip)
363 if (walker->fn(sch,(unsigned long) flow,walker) < 0) { 366 if (walker->fn(sch, (unsigned long)flow, walker) < 0) {
364 walker->stop = 1; 367 walker->stop = 1;
365 break; 368 break;
366 } 369 }
@@ -368,73 +371,71 @@ static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker)
368 } 371 }
369} 372}
370 373
371 374static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
372static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch,unsigned long cl)
373{ 375{
374 struct atm_qdisc_data *p = PRIV(sch); 376 struct atm_qdisc_data *p = PRIV(sch);
375 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 377 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
376 378
377 DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 379 DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
378 return flow ? &flow->filter_list : &p->link.filter_list; 380 return flow ? &flow->filter_list : &p->link.filter_list;
379} 381}
380 382
381
382/* --------------------------- Qdisc operations ---------------------------- */ 383/* --------------------------- Qdisc operations ---------------------------- */
383 384
384 385static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
385static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
386{ 386{
387 struct atm_qdisc_data *p = PRIV(sch); 387 struct atm_qdisc_data *p = PRIV(sch);
388 struct atm_flow_data *flow = NULL ; /* @@@ */ 388 struct atm_flow_data *flow = NULL; /* @@@ */
389 struct tcf_result res; 389 struct tcf_result res;
390 int result; 390 int result;
391 int ret = NET_XMIT_POLICED; 391 int ret = NET_XMIT_POLICED;
392 392
393 D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); 393 D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
394 result = TC_POLICE_OK; /* be nice to gcc */ 394 result = TC_POLICE_OK; /* be nice to gcc */
395 if (TC_H_MAJ(skb->priority) != sch->handle || 395 if (TC_H_MAJ(skb->priority) != sch->handle ||
396 !(flow = (struct atm_flow_data *) atm_tc_get(sch,skb->priority))) 396 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority)))
397 for (flow = p->flows; flow; flow = flow->next) 397 for (flow = p->flows; flow; flow = flow->next)
398 if (flow->filter_list) { 398 if (flow->filter_list) {
399 result = tc_classify(skb,flow->filter_list, 399 result = tc_classify_compat(skb,
400 &res); 400 flow->filter_list,
401 if (result < 0) continue; 401 &res);
402 flow = (struct atm_flow_data *) res.class; 402 if (result < 0)
403 if (!flow) flow = lookup_flow(sch,res.classid); 403 continue;
404 flow = (struct atm_flow_data *)res.class;
405 if (!flow)
406 flow = lookup_flow(sch, res.classid);
404 break; 407 break;
405 } 408 }
406 if (!flow) flow = &p->link; 409 if (!flow)
410 flow = &p->link;
407 else { 411 else {
408 if (flow->vcc) 412 if (flow->vcc)
409 ATM_SKB(skb)->atm_options = flow->vcc->atm_options; 413 ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
410 /*@@@ looks good ... but it's not supposed to work :-)*/ 414 /*@@@ looks good ... but it's not supposed to work :-) */
411#ifdef CONFIG_NET_CLS_POLICE 415#ifdef CONFIG_NET_CLS_ACT
412 switch (result) { 416 switch (result) {
413 case TC_POLICE_SHOT: 417 case TC_ACT_QUEUED:
414 kfree_skb(skb); 418 case TC_ACT_STOLEN:
415 break; 419 kfree_skb(skb);
416 case TC_POLICE_RECLASSIFY: 420 return NET_XMIT_SUCCESS;
417 if (flow->excess) flow = flow->excess; 421 case TC_ACT_SHOT:
418 else { 422 kfree_skb(skb);
419 ATM_SKB(skb)->atm_options |= 423 goto drop;
420 ATM_ATMOPT_CLP; 424 case TC_POLICE_RECLASSIFY:
421 break; 425 if (flow->excess)
422 } 426 flow = flow->excess;
423 /* fall through */ 427 else
424 case TC_POLICE_OK: 428 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
425 /* fall through */ 429 break;
426 default:
427 break;
428 } 430 }
429#endif 431#endif
430 } 432 }
431 if ( 433
432#ifdef CONFIG_NET_CLS_POLICE 434 if ((ret = flow->q->enqueue(skb, flow->q)) != 0) {
433 result == TC_POLICE_SHOT || 435drop: __maybe_unused
434#endif
435 (ret = flow->q->enqueue(skb,flow->q)) != 0) {
436 sch->qstats.drops++; 436 sch->qstats.drops++;
437 if (flow) flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
438 return ret; 439 return ret;
439 } 440 }
440 sch->bstats.bytes += skb->len; 441 sch->bstats.bytes += skb->len;
@@ -458,7 +459,6 @@ static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
458 return NET_XMIT_BYPASS; 459 return NET_XMIT_BYPASS;
459} 460}
460 461
461
462/* 462/*
463 * Dequeue packets and send them over ATM. Note that we quite deliberately 463 * Dequeue packets and send them over ATM. Note that we quite deliberately
464 * avoid checking net_device's flow control here, simply because sch_atm 464 * avoid checking net_device's flow control here, simply because sch_atm
@@ -466,167 +466,163 @@ static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
466 * non-ATM interfaces. 466 * non-ATM interfaces.
467 */ 467 */
468 468
469
470static void sch_atm_dequeue(unsigned long data) 469static void sch_atm_dequeue(unsigned long data)
471{ 470{
472 struct Qdisc *sch = (struct Qdisc *) data; 471 struct Qdisc *sch = (struct Qdisc *)data;
473 struct atm_qdisc_data *p = PRIV(sch); 472 struct atm_qdisc_data *p = PRIV(sch);
474 struct atm_flow_data *flow; 473 struct atm_flow_data *flow;
475 struct sk_buff *skb; 474 struct sk_buff *skb;
476 475
477 D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n",sch,p); 476 D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
478 for (flow = p->link.next; flow; flow = flow->next) 477 for (flow = p->link.next; flow; flow = flow->next)
479 /* 478 /*
480 * If traffic is properly shaped, this won't generate nasty 479 * If traffic is properly shaped, this won't generate nasty
481 * little bursts. Otherwise, it may ... (but that's okay) 480 * little bursts. Otherwise, it may ... (but that's okay)
482 */ 481 */
483 while ((skb = flow->q->dequeue(flow->q))) { 482 while ((skb = flow->q->dequeue(flow->q))) {
484 if (!atm_may_send(flow->vcc,skb->truesize)) { 483 if (!atm_may_send(flow->vcc, skb->truesize)) {
485 (void) flow->q->ops->requeue(skb,flow->q); 484 (void)flow->q->ops->requeue(skb, flow->q);
486 break; 485 break;
487 } 486 }
488 D2PRINTK("atm_tc_dequeue: sending on class %p\n",flow); 487 D2PRINTK("atm_tc_dequeue: sending on class %p\n", flow);
489 /* remove any LL header somebody else has attached */ 488 /* remove any LL header somebody else has attached */
490 skb_pull(skb, skb_network_offset(skb)); 489 skb_pull(skb, skb_network_offset(skb));
491 if (skb_headroom(skb) < flow->hdr_len) { 490 if (skb_headroom(skb) < flow->hdr_len) {
492 struct sk_buff *new; 491 struct sk_buff *new;
493 492
494 new = skb_realloc_headroom(skb,flow->hdr_len); 493 new = skb_realloc_headroom(skb, flow->hdr_len);
495 dev_kfree_skb(skb); 494 dev_kfree_skb(skb);
496 if (!new) continue; 495 if (!new)
496 continue;
497 skb = new; 497 skb = new;
498 } 498 }
499 D2PRINTK("sch_atm_dequeue: ip %p, data %p\n", 499 D2PRINTK("sch_atm_dequeue: ip %p, data %p\n",
500 skb_network_header(skb), skb->data); 500 skb_network_header(skb), skb->data);
501 ATM_SKB(skb)->vcc = flow->vcc; 501 ATM_SKB(skb)->vcc = flow->vcc;
502 memcpy(skb_push(skb,flow->hdr_len),flow->hdr, 502 memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
503 flow->hdr_len); 503 flow->hdr_len);
504 atomic_add(skb->truesize, 504 atomic_add(skb->truesize,
505 &sk_atm(flow->vcc)->sk_wmem_alloc); 505 &sk_atm(flow->vcc)->sk_wmem_alloc);
506 /* atm.atm_options are already set by atm_tc_enqueue */ 506 /* atm.atm_options are already set by atm_tc_enqueue */
507 (void) flow->vcc->send(flow->vcc,skb); 507 flow->vcc->send(flow->vcc, skb);
508 } 508 }
509} 509}
510 510
511
512static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) 511static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
513{ 512{
514 struct atm_qdisc_data *p = PRIV(sch); 513 struct atm_qdisc_data *p = PRIV(sch);
515 struct sk_buff *skb; 514 struct sk_buff *skb;
516 515
517 D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n",sch,p); 516 D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
518 tasklet_schedule(&p->task); 517 tasklet_schedule(&p->task);
519 skb = p->link.q->dequeue(p->link.q); 518 skb = p->link.q->dequeue(p->link.q);
520 if (skb) sch->q.qlen--; 519 if (skb)
520 sch->q.qlen--;
521 return skb; 521 return skb;
522} 522}
523 523
524 524static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
525static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch)
526{ 525{
527 struct atm_qdisc_data *p = PRIV(sch); 526 struct atm_qdisc_data *p = PRIV(sch);
528 int ret; 527 int ret;
529 528
530 D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); 529 D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
531 ret = p->link.q->ops->requeue(skb,p->link.q); 530 ret = p->link.q->ops->requeue(skb, p->link.q);
532 if (!ret) { 531 if (!ret) {
533 sch->q.qlen++; 532 sch->q.qlen++;
534 sch->qstats.requeues++; 533 sch->qstats.requeues++;
535 } else { 534 } else {
536 sch->qstats.drops++; 535 sch->qstats.drops++;
537 p->link.qstats.drops++; 536 p->link.qstats.drops++;
538 } 537 }
539 return ret; 538 return ret;
540} 539}
541 540
542
543static unsigned int atm_tc_drop(struct Qdisc *sch) 541static unsigned int atm_tc_drop(struct Qdisc *sch)
544{ 542{
545 struct atm_qdisc_data *p = PRIV(sch); 543 struct atm_qdisc_data *p = PRIV(sch);
546 struct atm_flow_data *flow; 544 struct atm_flow_data *flow;
547 unsigned int len; 545 unsigned int len;
548 546
549 DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n",sch,p); 547 DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
550 for (flow = p->flows; flow; flow = flow->next) 548 for (flow = p->flows; flow; flow = flow->next)
551 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q))) 549 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
552 return len; 550 return len;
553 return 0; 551 return 0;
554} 552}
555 553
556 554static int atm_tc_init(struct Qdisc *sch, struct rtattr *opt)
557static int atm_tc_init(struct Qdisc *sch,struct rtattr *opt)
558{ 555{
559 struct atm_qdisc_data *p = PRIV(sch); 556 struct atm_qdisc_data *p = PRIV(sch);
560 557
561 DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); 558 DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
562 p->flows = &p->link; 559 p->flows = &p->link;
563 if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops, 560 if (!(p->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
564 sch->handle))) 561 sch->handle)))
565 p->link.q = &noop_qdisc; 562 p->link.q = &noop_qdisc;
566 DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q); 563 DPRINTK("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
567 p->link.filter_list = NULL; 564 p->link.filter_list = NULL;
568 p->link.vcc = NULL; 565 p->link.vcc = NULL;
569 p->link.sock = NULL; 566 p->link.sock = NULL;
570 p->link.classid = sch->handle; 567 p->link.classid = sch->handle;
571 p->link.ref = 1; 568 p->link.ref = 1;
572 p->link.next = NULL; 569 p->link.next = NULL;
573 tasklet_init(&p->task,sch_atm_dequeue,(unsigned long) sch); 570 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
574 return 0; 571 return 0;
575} 572}
576 573
577
578static void atm_tc_reset(struct Qdisc *sch) 574static void atm_tc_reset(struct Qdisc *sch)
579{ 575{
580 struct atm_qdisc_data *p = PRIV(sch); 576 struct atm_qdisc_data *p = PRIV(sch);
581 struct atm_flow_data *flow; 577 struct atm_flow_data *flow;
582 578
583 DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n",sch,p); 579 DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
584 for (flow = p->flows; flow; flow = flow->next) qdisc_reset(flow->q); 580 for (flow = p->flows; flow; flow = flow->next)
581 qdisc_reset(flow->q);
585 sch->q.qlen = 0; 582 sch->q.qlen = 0;
586} 583}
587 584
588
589static void atm_tc_destroy(struct Qdisc *sch) 585static void atm_tc_destroy(struct Qdisc *sch)
590{ 586{
591 struct atm_qdisc_data *p = PRIV(sch); 587 struct atm_qdisc_data *p = PRIV(sch);
592 struct atm_flow_data *flow; 588 struct atm_flow_data *flow;
593 589
594 DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n",sch,p); 590 DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
595 /* races ? */ 591 /* races ? */
596 while ((flow = p->flows)) { 592 while ((flow = p->flows)) {
597 tcf_destroy_chain(flow->filter_list); 593 tcf_destroy_chain(flow->filter_list);
598 flow->filter_list = NULL; 594 flow->filter_list = NULL;
599 if (flow->ref > 1) 595 if (flow->ref > 1)
600 printk(KERN_ERR "atm_destroy: %p->ref = %d\n",flow, 596 printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
601 flow->ref); 597 flow->ref);
602 atm_tc_put(sch,(unsigned long) flow); 598 atm_tc_put(sch, (unsigned long)flow);
603 if (p->flows == flow) { 599 if (p->flows == flow) {
604 printk(KERN_ERR "atm_destroy: putting flow %p didn't " 600 printk(KERN_ERR "atm_destroy: putting flow %p didn't "
605 "kill it\n",flow); 601 "kill it\n", flow);
606 p->flows = flow->next; /* brute force */ 602 p->flows = flow->next; /* brute force */
607 break; 603 break;
608 } 604 }
609 } 605 }
610 tasklet_kill(&p->task); 606 tasklet_kill(&p->task);
611} 607}
612 608
613
614static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, 609static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
615 struct sk_buff *skb, struct tcmsg *tcm) 610 struct sk_buff *skb, struct tcmsg *tcm)
616{ 611{
617 struct atm_qdisc_data *p = PRIV(sch); 612 struct atm_qdisc_data *p = PRIV(sch);
618 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 613 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
619 unsigned char *b = skb_tail_pointer(skb); 614 unsigned char *b = skb_tail_pointer(skb);
620 struct rtattr *rta; 615 struct rtattr *rta;
621 616
622 DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", 617 DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
623 sch,p,flow,skb,tcm); 618 sch, p, flow, skb, tcm);
624 if (!find_flow(p,flow)) return -EINVAL; 619 if (!find_flow(p, flow))
620 return -EINVAL;
625 tcm->tcm_handle = flow->classid; 621 tcm->tcm_handle = flow->classid;
626 tcm->tcm_info = flow->q->handle; 622 tcm->tcm_info = flow->q->handle;
627 rta = (struct rtattr *) b; 623 rta = (struct rtattr *)b;
628 RTA_PUT(skb,TCA_OPTIONS,0,NULL); 624 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
629 RTA_PUT(skb,TCA_ATM_HDR,flow->hdr_len,flow->hdr); 625 RTA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
630 if (flow->vcc) { 626 if (flow->vcc) {
631 struct sockaddr_atmpvc pvc; 627 struct sockaddr_atmpvc pvc;
632 int state; 628 int state;
@@ -635,16 +631,16 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
635 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 631 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
636 pvc.sap_addr.vpi = flow->vcc->vpi; 632 pvc.sap_addr.vpi = flow->vcc->vpi;
637 pvc.sap_addr.vci = flow->vcc->vci; 633 pvc.sap_addr.vci = flow->vcc->vci;
638 RTA_PUT(skb,TCA_ATM_ADDR,sizeof(pvc),&pvc); 634 RTA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
639 state = ATM_VF2VS(flow->vcc->flags); 635 state = ATM_VF2VS(flow->vcc->flags);
640 RTA_PUT(skb,TCA_ATM_STATE,sizeof(state),&state); 636 RTA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state);
641 } 637 }
642 if (flow->excess) 638 if (flow->excess)
643 RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(u32),&flow->classid); 639 RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid);
644 else { 640 else {
645 static u32 zero; 641 static u32 zero;
646 642
647 RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero); 643 RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero);
648 } 644 }
649 rta->rta_len = skb_tail_pointer(skb) - b; 645 rta->rta_len = skb_tail_pointer(skb) - b;
650 return skb->len; 646 return skb->len;
@@ -655,9 +651,9 @@ rtattr_failure:
655} 651}
656static int 652static int
657atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 653atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
658 struct gnet_dump *d) 654 struct gnet_dump *d)
659{ 655{
660 struct atm_flow_data *flow = (struct atm_flow_data *) arg; 656 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
661 657
662 flow->qstats.qlen = flow->q->q.qlen; 658 flow->qstats.qlen = flow->q->q.qlen;
663 659
@@ -674,38 +670,35 @@ static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
674} 670}
675 671
676static struct Qdisc_class_ops atm_class_ops = { 672static struct Qdisc_class_ops atm_class_ops = {
677 .graft = atm_tc_graft, 673 .graft = atm_tc_graft,
678 .leaf = atm_tc_leaf, 674 .leaf = atm_tc_leaf,
679 .get = atm_tc_get, 675 .get = atm_tc_get,
680 .put = atm_tc_put, 676 .put = atm_tc_put,
681 .change = atm_tc_change, 677 .change = atm_tc_change,
682 .delete = atm_tc_delete, 678 .delete = atm_tc_delete,
683 .walk = atm_tc_walk, 679 .walk = atm_tc_walk,
684 .tcf_chain = atm_tc_find_tcf, 680 .tcf_chain = atm_tc_find_tcf,
685 .bind_tcf = atm_tc_bind_filter, 681 .bind_tcf = atm_tc_bind_filter,
686 .unbind_tcf = atm_tc_put, 682 .unbind_tcf = atm_tc_put,
687 .dump = atm_tc_dump_class, 683 .dump = atm_tc_dump_class,
688 .dump_stats = atm_tc_dump_class_stats, 684 .dump_stats = atm_tc_dump_class_stats,
689}; 685};
690 686
691static struct Qdisc_ops atm_qdisc_ops = { 687static struct Qdisc_ops atm_qdisc_ops = {
692 .next = NULL, 688 .cl_ops = &atm_class_ops,
693 .cl_ops = &atm_class_ops, 689 .id = "atm",
694 .id = "atm", 690 .priv_size = sizeof(struct atm_qdisc_data),
695 .priv_size = sizeof(struct atm_qdisc_data), 691 .enqueue = atm_tc_enqueue,
696 .enqueue = atm_tc_enqueue, 692 .dequeue = atm_tc_dequeue,
697 .dequeue = atm_tc_dequeue, 693 .requeue = atm_tc_requeue,
698 .requeue = atm_tc_requeue, 694 .drop = atm_tc_drop,
699 .drop = atm_tc_drop, 695 .init = atm_tc_init,
700 .init = atm_tc_init, 696 .reset = atm_tc_reset,
701 .reset = atm_tc_reset, 697 .destroy = atm_tc_destroy,
702 .destroy = atm_tc_destroy, 698 .dump = atm_tc_dump,
703 .change = NULL, 699 .owner = THIS_MODULE,
704 .dump = atm_tc_dump,
705 .owner = THIS_MODULE,
706}; 700};
707 701
708
709static int __init atm_init(void) 702static int __init atm_init(void)
710{ 703{
711 return register_qdisc(&atm_qdisc_ops); 704 return register_qdisc(&atm_qdisc_ops);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index b184c3545145..e38c2839b25c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -82,7 +82,7 @@ struct cbq_class
82 unsigned char priority2; /* priority to be used after overlimit */ 82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */ 83 unsigned char ewma_log; /* time constant for idle time calculation */
84 unsigned char ovl_strategy; 84 unsigned char ovl_strategy;
85#ifdef CONFIG_NET_CLS_POLICE 85#ifdef CONFIG_NET_CLS_ACT
86 unsigned char police; 86 unsigned char police;
87#endif 87#endif
88 88
@@ -154,7 +154,7 @@ struct cbq_sched_data
154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes 154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
155 with backlog */ 155 with backlog */
156 156
157#ifdef CONFIG_NET_CLS_POLICE 157#ifdef CONFIG_NET_CLS_ACT
158 struct cbq_class *rx_class; 158 struct cbq_class *rx_class;
159#endif 159#endif
160 struct cbq_class *tx_class; 160 struct cbq_class *tx_class;
@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
196 return NULL; 196 return NULL;
197} 197}
198 198
199#ifdef CONFIG_NET_CLS_POLICE 199#ifdef CONFIG_NET_CLS_ACT
200 200
201static struct cbq_class * 201static struct cbq_class *
202cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) 202cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
247 /* 247 /*
248 * Step 2+n. Apply classifier. 248 * Step 2+n. Apply classifier.
249 */ 249 */
250 if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0) 250 if (!head->filter_list ||
251 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
251 goto fallback; 252 goto fallback;
252 253
253 if ((cl = (void*)res.class) == NULL) { 254 if ((cl = (void*)res.class) == NULL) {
@@ -267,15 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
267 *qerr = NET_XMIT_SUCCESS; 268 *qerr = NET_XMIT_SUCCESS;
268 case TC_ACT_SHOT: 269 case TC_ACT_SHOT:
269 return NULL; 270 return NULL;
270 } 271 case TC_ACT_RECLASSIFY:
271#elif defined(CONFIG_NET_CLS_POLICE)
272 switch (result) {
273 case TC_POLICE_RECLASSIFY:
274 return cbq_reclassify(skb, cl); 272 return cbq_reclassify(skb, cl);
275 case TC_POLICE_SHOT:
276 return NULL;
277 default:
278 break;
279 } 273 }
280#endif 274#endif
281 if (cl->level == 0) 275 if (cl->level == 0)
@@ -389,7 +383,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
389 int ret; 383 int ret;
390 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 384 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
391 385
392#ifdef CONFIG_NET_CLS_POLICE 386#ifdef CONFIG_NET_CLS_ACT
393 q->rx_class = cl; 387 q->rx_class = cl;
394#endif 388#endif
395 if (cl == NULL) { 389 if (cl == NULL) {
@@ -399,7 +393,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
399 return ret; 393 return ret;
400 } 394 }
401 395
402#ifdef CONFIG_NET_CLS_POLICE 396#ifdef CONFIG_NET_CLS_ACT
403 cl->q->__parent = sch; 397 cl->q->__parent = sch;
404#endif 398#endif
405 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { 399 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
@@ -434,7 +428,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
434 428
435 cbq_mark_toplevel(q, cl); 429 cbq_mark_toplevel(q, cl);
436 430
437#ifdef CONFIG_NET_CLS_POLICE 431#ifdef CONFIG_NET_CLS_ACT
438 q->rx_class = cl; 432 q->rx_class = cl;
439 cl->q->__parent = sch; 433 cl->q->__parent = sch;
440#endif 434#endif
@@ -669,9 +663,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
669 return HRTIMER_NORESTART; 663 return HRTIMER_NORESTART;
670} 664}
671 665
672 666#ifdef CONFIG_NET_CLS_ACT
673#ifdef CONFIG_NET_CLS_POLICE
674
675static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 667static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
676{ 668{
677 int len = skb->len; 669 int len = skb->len;
@@ -1364,7 +1356,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1364 return 0; 1356 return 0;
1365} 1357}
1366 1358
1367#ifdef CONFIG_NET_CLS_POLICE 1359#ifdef CONFIG_NET_CLS_ACT
1368static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) 1360static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1369{ 1361{
1370 cl->police = p->police; 1362 cl->police = p->police;
@@ -1532,7 +1524,7 @@ rtattr_failure:
1532 return -1; 1524 return -1;
1533} 1525}
1534 1526
1535#ifdef CONFIG_NET_CLS_POLICE 1527#ifdef CONFIG_NET_CLS_ACT
1536static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) 1528static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1537{ 1529{
1538 unsigned char *b = skb_tail_pointer(skb); 1530 unsigned char *b = skb_tail_pointer(skb);
@@ -1558,7 +1550,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1558 cbq_dump_rate(skb, cl) < 0 || 1550 cbq_dump_rate(skb, cl) < 0 ||
1559 cbq_dump_wrr(skb, cl) < 0 || 1551 cbq_dump_wrr(skb, cl) < 0 ||
1560 cbq_dump_ovl(skb, cl) < 0 || 1552 cbq_dump_ovl(skb, cl) < 0 ||
1561#ifdef CONFIG_NET_CLS_POLICE 1553#ifdef CONFIG_NET_CLS_ACT
1562 cbq_dump_police(skb, cl) < 0 || 1554 cbq_dump_police(skb, cl) < 0 ||
1563#endif 1555#endif
1564 cbq_dump_fopt(skb, cl) < 0) 1556 cbq_dump_fopt(skb, cl) < 0)
@@ -1653,7 +1645,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1653 cl->classid)) == NULL) 1645 cl->classid)) == NULL)
1654 return -ENOBUFS; 1646 return -ENOBUFS;
1655 } else { 1647 } else {
1656#ifdef CONFIG_NET_CLS_POLICE 1648#ifdef CONFIG_NET_CLS_ACT
1657 if (cl->police == TC_POLICE_RECLASSIFY) 1649 if (cl->police == TC_POLICE_RECLASSIFY)
1658 new->reshape_fail = cbq_reshape_fail; 1650 new->reshape_fail = cbq_reshape_fail;
1659#endif 1651#endif
@@ -1718,7 +1710,7 @@ cbq_destroy(struct Qdisc* sch)
1718 struct cbq_class *cl; 1710 struct cbq_class *cl;
1719 unsigned h; 1711 unsigned h;
1720 1712
1721#ifdef CONFIG_NET_CLS_POLICE 1713#ifdef CONFIG_NET_CLS_ACT
1722 q->rx_class = NULL; 1714 q->rx_class = NULL;
1723#endif 1715#endif
1724 /* 1716 /*
@@ -1747,7 +1739,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1747 struct cbq_class *cl = (struct cbq_class*)arg; 1739 struct cbq_class *cl = (struct cbq_class*)arg;
1748 1740
1749 if (--cl->refcnt == 0) { 1741 if (--cl->refcnt == 0) {
1750#ifdef CONFIG_NET_CLS_POLICE 1742#ifdef CONFIG_NET_CLS_ACT
1751 struct cbq_sched_data *q = qdisc_priv(sch); 1743 struct cbq_sched_data *q = qdisc_priv(sch);
1752 1744
1753 spin_lock_bh(&sch->dev->queue_lock); 1745 spin_lock_bh(&sch->dev->queue_lock);
@@ -1795,7 +1787,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1795 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) 1787 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
1796 return -EINVAL; 1788 return -EINVAL;
1797 1789
1798#ifdef CONFIG_NET_CLS_POLICE 1790#ifdef CONFIG_NET_CLS_ACT
1799 if (tb[TCA_CBQ_POLICE-1] && 1791 if (tb[TCA_CBQ_POLICE-1] &&
1800 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) 1792 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
1801 return -EINVAL; 1793 return -EINVAL;
@@ -1838,7 +1830,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1838 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1830 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1839 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1831 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1840 1832
1841#ifdef CONFIG_NET_CLS_POLICE 1833#ifdef CONFIG_NET_CLS_ACT
1842 if (tb[TCA_CBQ_POLICE-1]) 1834 if (tb[TCA_CBQ_POLICE-1])
1843 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1835 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1844#endif 1836#endif
@@ -1931,7 +1923,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1931 cl->overlimit = cbq_ovl_classic; 1923 cl->overlimit = cbq_ovl_classic;
1932 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1924 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1933 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1925 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1934#ifdef CONFIG_NET_CLS_POLICE 1926#ifdef CONFIG_NET_CLS_ACT
1935 if (tb[TCA_CBQ_POLICE-1]) 1927 if (tb[TCA_CBQ_POLICE-1])
1936 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1928 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1937#endif 1929#endif
@@ -1975,7 +1967,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1975 q->tx_class = NULL; 1967 q->tx_class = NULL;
1976 q->tx_borrowed = NULL; 1968 q->tx_borrowed = NULL;
1977 } 1969 }
1978#ifdef CONFIG_NET_CLS_POLICE 1970#ifdef CONFIG_NET_CLS_ACT
1979 if (q->rx_class == cl) 1971 if (q->rx_class == cl)
1980 q->rx_class = NULL; 1972 q->rx_class = NULL;
1981#endif 1973#endif
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 4d2c233a8611..60f89199e3da 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -237,25 +237,23 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
237 D2PRINTK("result %d class 0x%04x\n", result, res.classid); 237 D2PRINTK("result %d class 0x%04x\n", result, res.classid);
238 238
239 switch (result) { 239 switch (result) {
240#ifdef CONFIG_NET_CLS_POLICE 240#ifdef CONFIG_NET_CLS_ACT
241 case TC_POLICE_SHOT: 241 case TC_ACT_QUEUED:
242 kfree_skb(skb); 242 case TC_ACT_STOLEN:
243 sch->qstats.drops++; 243 kfree_skb(skb);
244 return NET_XMIT_POLICED; 244 return NET_XMIT_SUCCESS;
245#if 0 245 case TC_ACT_SHOT:
246 case TC_POLICE_RECLASSIFY: 246 kfree_skb(skb);
247 /* FIXME: what to do here ??? */ 247 sch->qstats.drops++;
248 return NET_XMIT_BYPASS;
248#endif 249#endif
249#endif 250 case TC_ACT_OK:
250 case TC_POLICE_OK: 251 skb->tc_index = TC_H_MIN(res.classid);
251 skb->tc_index = TC_H_MIN(res.classid); 252 break;
252 break; 253 default:
253 case TC_POLICE_UNSPEC: 254 if (p->default_index != NO_DEFAULT_INDEX)
254 /* fall through */ 255 skb->tc_index = p->default_index;
255 default: 256 break;
256 if (p->default_index != NO_DEFAULT_INDEX)
257 skb->tc_index = p->default_index;
258 break;
259 } 257 }
260 } 258 }
261 259
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 874452c41a01..55e7e4530f43 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1174,9 +1174,6 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1174 case TC_ACT_SHOT: 1174 case TC_ACT_SHOT:
1175 return NULL; 1175 return NULL;
1176 } 1176 }
1177#elif defined(CONFIG_NET_CLS_POLICE)
1178 if (result == TC_POLICE_SHOT)
1179 return NULL;
1180#endif 1177#endif
1181 if ((cl = (struct hfsc_class *)res.class) == NULL) { 1178 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1182 if ((cl = hfsc_find_class(res.classid, sch)) == NULL) 1179 if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index b417a95df322..246a2f9765f1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -249,9 +249,6 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
249 case TC_ACT_SHOT: 249 case TC_ACT_SHOT:
250 return NULL; 250 return NULL;
251 } 251 }
252#elif defined(CONFIG_NET_CLS_POLICE)
253 if (result == TC_POLICE_SHOT)
254 return HTB_DIRECT;
255#endif 252#endif
256 if ((cl = (void *)res.class) == NULL) { 253 if ((cl = (void *)res.class) == NULL) {
257 if (res.classid == sch->handle) 254 if (res.classid == sch->handle)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index cd0aab6a2a7c..51f16b0af198 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -164,31 +164,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
164 result = TC_ACT_OK; 164 result = TC_ACT_OK;
165 break; 165 break;
166 } 166 }
167/* backward compat */
168#else
169#ifdef CONFIG_NET_CLS_POLICE
170 switch (result) {
171 case TC_POLICE_SHOT:
172 result = NF_DROP;
173 sch->qstats.drops++;
174 break;
175 case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
176 case TC_POLICE_OK:
177 case TC_POLICE_UNSPEC:
178 default:
179 sch->bstats.packets++;
180 sch->bstats.bytes += skb->len;
181 result = NF_ACCEPT;
182 break;
183 }
184
185#else 167#else
186 D2PRINTK("Overriding result to ACCEPT\n"); 168 D2PRINTK("Overriding result to ACCEPT\n");
187 result = NF_ACCEPT; 169 result = NF_ACCEPT;
188 sch->bstats.packets++; 170 sch->bstats.packets++;
189 sch->bstats.bytes += skb->len; 171 sch->bstats.bytes += skb->len;
190#endif 172#endif
191#endif
192 173
193 return result; 174 return result;
194} 175}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 22e431dace54..8c2639af4c6a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
125 125
126 if (skb->len > q->max_size) { 126 if (skb->len > q->max_size) {
127 sch->qstats.drops++; 127 sch->qstats.drops++;
128#ifdef CONFIG_NET_CLS_POLICE 128#ifdef CONFIG_NET_CLS_ACT
129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
130#endif 130#endif
131 kfree_skb(skb); 131 kfree_skb(skb);
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 3a96ae60271c..092116e390b6 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_WIRELESS_EXT) += wext.o 1obj-$(CONFIG_WIRELESS_EXT) += wext.o
2obj-$(CONFIG_CFG80211) += cfg80211.o 2obj-$(CONFIG_CFG80211) += cfg80211.o
3 3
4cfg80211-y += core.o sysfs.o 4cfg80211-y += core.o sysfs.o radiotap.o
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
new file mode 100644
index 000000000000..68c11d099917
--- /dev/null
+++ b/net/wireless/radiotap.c
@@ -0,0 +1,257 @@
1/*
2 * Radiotap parser
3 *
4 * Copyright 2007 Andy Green <andy@warmcat.com>
5 */
6
7#include <net/cfg80211.h>
8#include <net/ieee80211_radiotap.h>
9#include <asm/unaligned.h>
10
11/* function prototypes and related defs are in include/net/cfg80211.h */
12
13/**
14 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization
15 * @iterator: radiotap_iterator to initialize
16 * @radiotap_header: radiotap header to parse
17 * @max_length: total length we can parse into (eg, whole packet length)
18 *
19 * Returns: 0 or a negative error code if there is a problem.
20 *
21 * This function initializes an opaque iterator struct which can then
22 * be passed to ieee80211_radiotap_iterator_next() to visit every radiotap
23 * argument which is present in the header. It knows about extended
24 * present headers and handles them.
25 *
26 * How to use:
27 * call __ieee80211_radiotap_iterator_init() to init a semi-opaque iterator
28 * struct ieee80211_radiotap_iterator (no need to init the struct beforehand)
29 * checking for a good 0 return code. Then loop calling
30 * __ieee80211_radiotap_iterator_next()... it returns either 0,
31 * -ENOENT if there are no more args to parse, or -EINVAL if there is a problem.
32 * The iterator's @this_arg member points to the start of the argument
33 * associated with the current argument index that is present, which can be
34 * found in the iterator's @this_arg_index member. This arg index corresponds
35 * to the IEEE80211_RADIOTAP_... defines.
36 *
37 * Radiotap header length:
38 * You can find the CPU-endian total radiotap header length in
39 * iterator->max_length after executing ieee80211_radiotap_iterator_init()
40 * successfully.
41 *
42 * Alignment Gotcha:
43 * You must take care when dereferencing iterator.this_arg
44 * for multibyte types... the pointer is not aligned. Use
45 * get_unaligned((type *)iterator.this_arg) to dereference
46 * iterator.this_arg for type "type" safely on all arches.
47 *
48 * Example code:
49 * See Documentation/networking/radiotap-headers.txt
50 */
51
52int ieee80211_radiotap_iterator_init(
53 struct ieee80211_radiotap_iterator *iterator,
54 struct ieee80211_radiotap_header *radiotap_header,
55 int max_length)
56{
57 /* Linux only supports version 0 radiotap format */
58 if (radiotap_header->it_version)
59 return -EINVAL;
60
61 /* sanity check for allowed length and radiotap length field */
62 if (max_length < le16_to_cpu(get_unaligned(&radiotap_header->it_len)))
63 return -EINVAL;
64
65 iterator->rtheader = radiotap_header;
66 iterator->max_length = le16_to_cpu(get_unaligned(
67 &radiotap_header->it_len));
68 iterator->arg_index = 0;
69 iterator->bitmap_shifter = le32_to_cpu(get_unaligned(
70 &radiotap_header->it_present));
71 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header);
72 iterator->this_arg = NULL;
73
74 /* find payload start allowing for extended bitmap(s) */
75
76 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) {
77 while (le32_to_cpu(get_unaligned((__le32 *)iterator->arg)) &
78 (1<<IEEE80211_RADIOTAP_EXT)) {
79 iterator->arg += sizeof(u32);
80
81 /*
82 * check for insanity where the present bitmaps
83 * keep claiming to extend up to or even beyond the
84 * stated radiotap header length
85 */
86
87 if (((ulong)iterator->arg -
88 (ulong)iterator->rtheader) > iterator->max_length)
89 return -EINVAL;
90 }
91
92 iterator->arg += sizeof(u32);
93
94 /*
95 * no need to check again for blowing past stated radiotap
96 * header length, because ieee80211_radiotap_iterator_next
97 * checks it before it is dereferenced
98 */
99 }
100
101 /* we are all initialized happily */
102
103 return 0;
104}
105EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
106
107
108/**
109 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg
110 * @iterator: radiotap_iterator to move to next arg (if any)
111 *
112 * Returns: 0 if there is an argument to handle,
113 * -ENOENT if there are no more args or -EINVAL
114 * if there is something else wrong.
115 *
116 * This function provides the next radiotap arg index (IEEE80211_RADIOTAP_*)
117 * in @this_arg_index and sets @this_arg to point to the
118 * payload for the field. It takes care of alignment handling and extended
119 * present fields. @this_arg can be changed by the caller (eg,
120 * incremented to move inside a compound argument like
121 * IEEE80211_RADIOTAP_CHANNEL). The args pointed to are in
122 * little-endian format whatever the endianess of your CPU.
123 *
124 * Alignment Gotcha:
125 * You must take care when dereferencing iterator.this_arg
126 * for multibyte types... the pointer is not aligned. Use
127 * get_unaligned((type *)iterator.this_arg) to dereference
128 * iterator.this_arg for type "type" safely on all arches.
129 */
130
131int ieee80211_radiotap_iterator_next(
132 struct ieee80211_radiotap_iterator *iterator)
133{
134
135 /*
136 * small length lookup table for all radiotap types we heard of
137 * starting from b0 in the bitmap, so we can walk the payload
138 * area of the radiotap header
139 *
140 * There is a requirement to pad args, so that args
141 * of a given length must begin at a boundary of that length
142 * -- but note that compound args are allowed (eg, 2 x u16
143 * for IEEE80211_RADIOTAP_CHANNEL) so total arg length is not
144 * a reliable indicator of alignment requirement.
145 *
146 * upper nybble: content alignment for arg
147 * lower nybble: content length for arg
148 */
149
150 static const u8 rt_sizes[] = {
151 [IEEE80211_RADIOTAP_TSFT] = 0x88,
152 [IEEE80211_RADIOTAP_FLAGS] = 0x11,
153 [IEEE80211_RADIOTAP_RATE] = 0x11,
154 [IEEE80211_RADIOTAP_CHANNEL] = 0x24,
155 [IEEE80211_RADIOTAP_FHSS] = 0x22,
156 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = 0x11,
157 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = 0x11,
158 [IEEE80211_RADIOTAP_LOCK_QUALITY] = 0x22,
159 [IEEE80211_RADIOTAP_TX_ATTENUATION] = 0x22,
160 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = 0x22,
161 [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11,
162 [IEEE80211_RADIOTAP_ANTENNA] = 0x11,
163 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11,
164 [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11
165 /*
166 * add more here as they are defined in
167 * include/net/ieee80211_radiotap.h
168 */
169 };
170
171 /*
172 * for every radiotap entry we can at
173 * least skip (by knowing the length)...
174 */
175
176 while (iterator->arg_index < sizeof(rt_sizes)) {
177 int hit = 0;
178 int pad;
179
180 if (!(iterator->bitmap_shifter & 1))
181 goto next_entry; /* arg not present */
182
183 /*
184 * arg is present, account for alignment padding
185 * 8-bit args can be at any alignment
186 * 16-bit args must start on 16-bit boundary
187 * 32-bit args must start on 32-bit boundary
188 * 64-bit args must start on 64-bit boundary
189 *
190 * note that total arg size can differ from alignment of
191 * elements inside arg, so we use upper nybble of length
192 * table to base alignment on
193 *
194 * also note: these alignments are ** relative to the
195 * start of the radiotap header **. There is no guarantee
196 * that the radiotap header itself is aligned on any
197 * kind of boundary.
198 *
199 * the above is why get_unaligned() is used to dereference
200 * multibyte elements from the radiotap area
201 */
202
203 pad = (((ulong)iterator->arg) -
204 ((ulong)iterator->rtheader)) &
205 ((rt_sizes[iterator->arg_index] >> 4) - 1);
206
207 if (pad)
208 iterator->arg +=
209 (rt_sizes[iterator->arg_index] >> 4) - pad;
210
211 /*
212 * this is what we will return to user, but we need to
213 * move on first so next call has something fresh to test
214 */
215 iterator->this_arg_index = iterator->arg_index;
216 iterator->this_arg = iterator->arg;
217 hit = 1;
218
219 /* internally move on the size of this arg */
220 iterator->arg += rt_sizes[iterator->arg_index] & 0x0f;
221
222 /*
223 * check for insanity where we are given a bitmap that
224 * claims to have more arg content than the length of the
225 * radiotap section. We will normally end up equalling this
226 * max_length on the last arg, never exceeding it.
227 */
228
229 if (((ulong)iterator->arg - (ulong)iterator->rtheader) >
230 iterator->max_length)
231 return -EINVAL;
232
233 next_entry:
234 iterator->arg_index++;
235 if (unlikely((iterator->arg_index & 31) == 0)) {
236 /* completed current u32 bitmap */
237 if (iterator->bitmap_shifter & 1) {
238 /* b31 was set, there is more */
239 /* move to next u32 bitmap */
240 iterator->bitmap_shifter = le32_to_cpu(
241 get_unaligned(iterator->next_bitmap));
242 iterator->next_bitmap++;
243 } else
244 /* no more bitmaps: end */
245 iterator->arg_index = sizeof(rt_sizes);
246 } else /* just try the next bit */
247 iterator->bitmap_shifter >>= 1;
248
249 /* if we found a valid arg earlier, return it now */
250 if (hit)
251 return 0;
252 }
253
254 /* we don't know how to handle any more args, we're done */
255 return -ENOENT;
256}
257EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);