aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-01-24 17:09:35 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-24 17:09:35 -0500
commit5bdc22a56549e7983c6b443298672641952ea035 (patch)
treefbfd4e7453e4fd23dfef826d4610ed2aae47b36c /net
parentb6f4098897f30b7ea90a1c1edf35e9b20a9d828a (diff)
parente92427b289d252cfbd4cb5282d92f4ce1a5bb1fb (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/sched/sch_hfsc.c net/sched/sch_htb.c net/sched/sch_tbf.c
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/ipv4/arp.c11
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/mac80211/Kconfig6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/rfkill/Kconfig4
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c5
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_red.c11
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/xfrm/xfrm_user.c2
25 files changed, 47 insertions, 53 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 47d3d78d5416..d162ba8d622d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -749,7 +749,8 @@ EXPORT_SYMBOL(dev_get_by_index);
749 * @ha: hardware address 749 * @ha: hardware address
750 * 750 *
751 * Search for an interface by MAC address. Returns NULL if the device 751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold RCU 752 * is not found or a pointer to the device.
753 * The caller must hold RCU or RTNL.
753 * The returned device has not had its ref count increased 754 * The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking 755 * and the caller must therefore be careful about locking
755 * 756 *
@@ -2046,7 +2047,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
2046 2047
2047static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) 2048static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
2048{ 2049{
2049 if (!can_checksum_protocol(protocol, features)) { 2050 if (!can_checksum_protocol(features, protocol)) {
2050 features &= ~NETIF_F_ALL_CSUM; 2051 features &= ~NETIF_F_ALL_CSUM;
2051 features &= ~NETIF_F_SG; 2052 features &= ~NETIF_F_SG;
2052 } else if (illegal_highdma(skb->dev, skb)) { 2053 } else if (illegal_highdma(skb->dev, skb)) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 310eb804e092..c668f8c371b2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1853,7 +1853,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1853 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1853 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
1854 return -EPERM; 1854 return -EPERM;
1855 1855
1856 if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 1856 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
1857 struct sock *rtnl; 1857 struct sock *rtnl;
1858 rtnl_dumpit_func dumpit; 1858 rtnl_dumpit_func dumpit;
1859 1859
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0c877a74e1f4..3fb14b7c13cf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
428} 428}
429module_exit(dsa_cleanup_module); 429module_exit(dsa_cleanup_module);
430 430
431MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>") 431MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
432MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips"); 432MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
433MODULE_LICENSE("GPL"); 433MODULE_LICENSE("GPL");
434MODULE_ALIAS("platform:dsa"); 434MODULE_ALIAS("platform:dsa");
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 04c8b69fd426..7927589813b5 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 if (__in_dev_get_rcu(dev)) { 1020 if (__in_dev_get_rtnl(dev)) {
1021 IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on); 1021 IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
1022 return 0; 1022 return 0;
1023 } 1023 }
1024 return -ENXIO; 1024 return -ENXIO;
1025} 1025}
1026 1026
1027/* must be called with rcu_read_lock() */
1028static int arp_req_set_public(struct net *net, struct arpreq *r, 1027static int arp_req_set_public(struct net *net, struct arpreq *r,
1029 struct net_device *dev) 1028 struct net_device *dev)
1030{ 1029{
@@ -1233,10 +1232,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1233 if (!(r.arp_flags & ATF_NETMASK)) 1232 if (!(r.arp_flags & ATF_NETMASK))
1234 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1233 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
1235 htonl(0xFFFFFFFFUL); 1234 htonl(0xFFFFFFFFUL);
1236 rcu_read_lock(); 1235 rtnl_lock();
1237 if (r.arp_dev[0]) { 1236 if (r.arp_dev[0]) {
1238 err = -ENODEV; 1237 err = -ENODEV;
1239 dev = dev_get_by_name_rcu(net, r.arp_dev); 1238 dev = __dev_get_by_name(net, r.arp_dev);
1240 if (dev == NULL) 1239 if (dev == NULL)
1241 goto out; 1240 goto out;
1242 1241
@@ -1263,7 +1262,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1263 break; 1262 break;
1264 } 1263 }
1265out: 1264out:
1266 rcu_read_unlock(); 1265 rtnl_unlock();
1267 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r))) 1266 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
1268 err = -EFAULT; 1267 err = -EFAULT;
1269 return err; 1268 return err;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2746c1fa6417..2ada17129fce 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 nlmsg_len(nlh) < hdrlen) 858 nlmsg_len(nlh) < hdrlen)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 861 if (nlh->nlmsg_flags & NLM_F_DUMP) {
862 if (nlmsg_attrlen(nlh, hdrlen)) { 862 if (nlmsg_attrlen(nlh, hdrlen)) {
863 struct nlattr *attr; 863 struct nlattr *attr;
864 864
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 9109262abd24..c766056d0488 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -20,7 +20,7 @@ config MAC80211_HAS_RC
20 def_bool n 20 def_bool n
21 21
22config MAC80211_RC_PID 22config MAC80211_RC_PID
23 bool "PID controller based rate control algorithm" if EMBEDDED 23 bool "PID controller based rate control algorithm" if EXPERT
24 select MAC80211_HAS_RC 24 select MAC80211_HAS_RC
25 ---help--- 25 ---help---
26 This option enables a TX rate control algorithm for 26 This option enables a TX rate control algorithm for
@@ -28,14 +28,14 @@ config MAC80211_RC_PID
28 rate. 28 rate.
29 29
30config MAC80211_RC_MINSTREL 30config MAC80211_RC_MINSTREL
31 bool "Minstrel" if EMBEDDED 31 bool "Minstrel" if EXPERT
32 select MAC80211_HAS_RC 32 select MAC80211_HAS_RC
33 default y 33 default y
34 ---help--- 34 ---help---
35 This option enables the 'minstrel' TX rate control algorithm 35 This option enables the 'minstrel' TX rate control algorithm
36 36
37config MAC80211_RC_MINSTREL_HT 37config MAC80211_RC_MINSTREL_HT
38 bool "Minstrel 802.11n support" if EMBEDDED 38 bool "Minstrel 802.11n support" if EXPERT
39 depends on MAC80211_RC_MINSTREL 39 depends on MAC80211_RC_MINSTREL
40 default y 40 default y
41 ---help--- 41 ---help---
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 61c73945bb94..3fec12c570a8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -967,7 +967,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
967 u16 zone; 967 u16 zone;
968 int err; 968 int err;
969 969
970 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) 970 if (nlh->nlmsg_flags & NLM_F_DUMP)
971 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 971 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
972 ctnetlink_done); 972 ctnetlink_done);
973 973
@@ -1832,7 +1832,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1832 u16 zone; 1832 u16 zone;
1833 int err; 1833 int err;
1834 1834
1835 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 1835 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1836 return netlink_dump_start(ctnl, skb, nlh, 1836 return netlink_dump_start(ctnl, skb, nlh,
1837 ctnetlink_exp_dump_table, 1837 ctnetlink_exp_dump_table,
1838 ctnetlink_exp_done); 1838 ctnetlink_exp_done);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f83cb370292b..1781d99145e2 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
519 security_netlink_recv(skb, CAP_NET_ADMIN)) 519 security_netlink_recv(skb, CAP_NET_ADMIN))
520 return -EPERM; 520 return -EPERM;
521 521
522 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 522 if (nlh->nlmsg_flags & NLM_F_DUMP) {
523 if (ops->dumpit == NULL) 523 if (ops->dumpit == NULL)
524 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
525 525
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index eaf765876458..7fce6dfd2180 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
18 default y 18 default y
19 19
20config RFKILL_INPUT 20config RFKILL_INPUT
21 bool "RF switch input support" if EMBEDDED 21 bool "RF switch input support" if EXPERT
22 depends on RFKILL 22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT 23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EMBEDDED 24 default y if !EXPERT
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 25ed522b2891..24d94c097b35 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -391,7 +391,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
391 ret = qdisc_enqueue(skb, cl->q); 391 ret = qdisc_enqueue(skb, cl->q);
392 if (ret == NET_XMIT_SUCCESS) { 392 if (ret == NET_XMIT_SUCCESS) {
393 sch->q.qlen++; 393 sch->q.qlen++;
394 qdisc_bstats_update(sch, skb);
395 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive) 395 if (!cl->next_alive)
397 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -650,7 +649,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
650 ret = qdisc_enqueue(skb, cl->q); 649 ret = qdisc_enqueue(skb, cl->q);
651 if (ret == NET_XMIT_SUCCESS) { 650 if (ret == NET_XMIT_SUCCESS) {
652 sch->q.qlen++; 651 sch->q.qlen++;
653 qdisc_bstats_update(sch, skb);
654 if (!cl->next_alive) 652 if (!cl->next_alive)
655 cbq_activate_class(cl); 653 cbq_activate_class(cl);
656 return 0; 654 return 0;
@@ -973,6 +971,7 @@ cbq_dequeue(struct Qdisc *sch)
973 971
974 skb = cbq_dequeue_1(sch); 972 skb = cbq_dequeue_1(sch);
975 if (skb) { 973 if (skb) {
974 qdisc_bstats_update(sch, skb);
976 sch->q.qlen--; 975 sch->q.qlen--;
977 qdisc_unthrottled(sch); 976 qdisc_unthrottled(sch);
978 return skb; 977 return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e642eafc..6b7fe4a84f13 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
376 } 376 }
377 377
378 bstats_update(&cl->bstats, skb); 378 bstats_update(&cl->bstats, skb);
379 qdisc_bstats_update(sch, skb);
380 379
381 sch->q.qlen++; 380 sch->q.qlen++;
382 return err; 381 return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
403 skb = qdisc_dequeue_peeked(cl->qdisc); 402 skb = qdisc_dequeue_peeked(cl->qdisc);
404 if (cl->qdisc->q.qlen == 0) 403 if (cl->qdisc->q.qlen == 0)
405 list_del(&cl->alist); 404 list_del(&cl->alist);
405 qdisc_bstats_update(sch, skb);
406 sch->q.qlen--; 406 sch->q.qlen--;
407 return skb; 407 return skb;
408 } 408 }
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 4970d56b4aa7..2c790204d042 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 qdisc_bstats_update(sch, skb);
264 sch->q.qlen++; 263 sch->q.qlen++;
265 264
266 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
283 if (skb == NULL) 282 if (skb == NULL)
284 return NULL; 283 return NULL;
285 284
285 qdisc_bstats_update(sch, skb);
286 sch->q.qlen--; 286 sch->q.qlen--;
287 287
288 index = skb->tc_index & (p->indices - 1); 288 index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index f7290d2542c2..be33f9ddf9dd 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -45,17 +45,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
45 45
46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) 46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
47{ 47{
48 struct sk_buff *skb_head;
49 struct fifo_sched_data *q = qdisc_priv(sch); 48 struct fifo_sched_data *q = qdisc_priv(sch);
50 49
51 if (likely(skb_queue_len(&sch->q) < q->limit)) 50 if (likely(skb_queue_len(&sch->q) < q->limit))
52 return qdisc_enqueue_tail(skb, sch); 51 return qdisc_enqueue_tail(skb, sch);
53 52
54 /* queue full, remove one skb to fulfill the limit */ 53 /* queue full, remove one skb to fulfill the limit */
55 skb_head = qdisc_dequeue_head(sch); 54 __qdisc_queue_drop_head(sch, &sch->q);
56 sch->qstats.drops++; 55 sch->qstats.drops++;
57 kfree_skb(skb_head);
58
59 qdisc_enqueue_tail(skb, sch); 56 qdisc_enqueue_tail(skb, sch);
60 57
61 return NET_XMIT_CN; 58 return NET_XMIT_CN;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b632d9251913..6488e6425652 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1598,7 +1598,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1598 set_active(cl, qdisc_pkt_len(skb)); 1598 set_active(cl, qdisc_pkt_len(skb));
1599 1599
1600 bstats_update(&cl->bstats, skb); 1600 bstats_update(&cl->bstats, skb);
1601 qdisc_bstats_update(sch, skb);
1602 sch->q.qlen++; 1601 sch->q.qlen++;
1603 1602
1604 return NET_XMIT_SUCCESS; 1603 return NET_XMIT_SUCCESS;
@@ -1665,6 +1664,7 @@ hfsc_dequeue(struct Qdisc *sch)
1665 } 1664 }
1666 1665
1667 qdisc_unthrottled(sch); 1666 qdisc_unthrottled(sch);
1667 qdisc_bstats_update(sch, skb);
1668 sch->q.qlen--; 1668 sch->q.qlen--;
1669 1669
1670 return skb; 1670 return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 39db75cd8c17..e1429a85091f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -581,7 +581,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
581 } 581 }
582 582
583 sch->q.qlen++; 583 sch->q.qlen++;
584 qdisc_bstats_update(sch, skb);
585 return NET_XMIT_SUCCESS; 584 return NET_XMIT_SUCCESS;
586} 585}
587 586
@@ -856,7 +855,7 @@ next:
856 855
857static struct sk_buff *htb_dequeue(struct Qdisc *sch) 856static struct sk_buff *htb_dequeue(struct Qdisc *sch)
858{ 857{
859 struct sk_buff *skb = NULL; 858 struct sk_buff *skb;
860 struct htb_sched *q = qdisc_priv(sch); 859 struct htb_sched *q = qdisc_priv(sch);
861 int level; 860 int level;
862 psched_time_t next_event; 861 psched_time_t next_event;
@@ -865,6 +864,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
865 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 864 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
866 skb = __skb_dequeue(&q->direct_queue); 865 skb = __skb_dequeue(&q->direct_queue);
867 if (skb != NULL) { 866 if (skb != NULL) {
867ok:
868 qdisc_bstats_update(sch, skb);
868 qdisc_unthrottled(sch); 869 qdisc_unthrottled(sch);
869 sch->q.qlen--; 870 sch->q.qlen--;
870 return skb; 871 return skb;
@@ -899,11 +900,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
899 900
900 m |= 1 << prio; 901 m |= 1 << prio;
901 skb = htb_dequeue_tree(q, prio, level); 902 skb = htb_dequeue_tree(q, prio, level);
902 if (likely(skb != NULL)) { 903 if (likely(skb != NULL))
903 sch->q.qlen--; 904 goto ok;
904 qdisc_unthrottled(sch);
905 goto fin;
906 }
907 } 905 }
908 } 906 }
909 sch->qstats.overlimits++; 907 sch->qstats.overlimits++;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 820f2a7ca14d..edc1950e0e77 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 qdisc_bstats_update(sch, skb);
87 sch->q.qlen++; 86 sch->q.qlen++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
112 qdisc = q->queues[q->curband]; 111 qdisc = q->queues[q->curband];
113 skb = qdisc->dequeue(qdisc); 112 skb = qdisc->dequeue(qdisc);
114 if (skb) { 113 if (skb) {
114 qdisc_bstats_update(sch, skb);
115 sch->q.qlen--; 115 sch->q.qlen--;
116 return skb; 116 return skb;
117 } 117 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c26ef3614f7e..64f0d3293b49 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 qdisc_bstats_update(sch, skb);
244 } else if (net_xmit_drop_count(ret)) { 243 } else if (net_xmit_drop_count(ret)) {
245 sch->qstats.drops++; 244 sch->qstats.drops++;
246 } 245 }
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
289 skb->tstamp.tv64 = 0; 288 skb->tstamp.tv64 = 0;
290#endif 289#endif
291 pr_debug("netem_dequeue: return skb=%p\n", skb); 290 pr_debug("netem_dequeue: return skb=%p\n", skb);
291 qdisc_bstats_update(sch, skb);
292 sch->q.qlen--; 292 sch->q.qlen--;
293 return skb; 293 return skb;
294 } 294 }
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
476 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
477 477
478 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
479 qdisc_bstats_update(sch, nskb);
480 479
481 return NET_XMIT_SUCCESS; 480 return NET_XMIT_SUCCESS;
482 } 481 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 3bea31e101b5..2a318f2dc3e5 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -83,7 +83,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 qdisc_bstats_update(sch, skb);
87 sch->q.qlen++; 86 sch->q.qlen++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
@@ -115,6 +114,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
115 struct Qdisc *qdisc = q->queues[prio]; 114 struct Qdisc *qdisc = q->queues[prio];
116 struct sk_buff *skb = qdisc->dequeue(qdisc); 115 struct sk_buff *skb = qdisc->dequeue(qdisc);
117 if (skb) { 116 if (skb) {
117 qdisc_bstats_update(sch, skb);
118 sch->q.qlen--; 118 sch->q.qlen--;
119 return skb; 119 return skb;
120 } 120 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 689157555fa4..6649463da1b6 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -93,7 +93,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
93 93
94 ret = qdisc_enqueue(skb, child); 94 ret = qdisc_enqueue(skb, child);
95 if (likely(ret == NET_XMIT_SUCCESS)) { 95 if (likely(ret == NET_XMIT_SUCCESS)) {
96 qdisc_bstats_update(sch, skb);
97 sch->q.qlen++; 96 sch->q.qlen++;
98 } else if (net_xmit_drop_count(ret)) { 97 } else if (net_xmit_drop_count(ret)) {
99 q->stats.pdrop++; 98 q->stats.pdrop++;
@@ -113,11 +112,13 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
113 struct Qdisc *child = q->qdisc; 112 struct Qdisc *child = q->qdisc;
114 113
115 skb = child->dequeue(child); 114 skb = child->dequeue(child);
116 if (skb) 115 if (skb) {
116 qdisc_bstats_update(sch, skb);
117 sch->q.qlen--; 117 sch->q.qlen--;
118 else if (!red_is_idling(&q->parms)) 118 } else {
119 red_start_of_idle_period(&q->parms); 119 if (!red_is_idling(&q->parms))
120 120 red_start_of_idle_period(&q->parms);
121 }
121 return skb; 122 return skb;
122} 123}
123 124
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index fdba52aa053d..4cff44235773 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
402 q->tail = slot; 402 q->tail = slot;
403 slot->allot = q->scaled_quantum; 403 slot->allot = q->scaled_quantum;
404 } 404 }
405 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit)
406 qdisc_bstats_update(sch, skb);
407 return NET_XMIT_SUCCESS; 406 return NET_XMIT_SUCCESS;
408 }
409 407
410 sfq_drop(sch); 408 sfq_drop(sch);
411 return NET_XMIT_CN; 409 return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
445 } 443 }
446 skb = slot_dequeue_head(slot); 444 skb = slot_dequeue_head(slot);
447 sfq_dec(q, a); 445 sfq_dec(q, a);
446 qdisc_bstats_update(sch, skb);
448 sch->q.qlen--; 447 sch->q.qlen--;
449 sch->qstats.backlog -= qdisc_pkt_len(skb); 448 sch->qstats.backlog -= qdisc_pkt_len(skb);
450 449
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 86c016696977..1dcfb5223a86 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -133,7 +133,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
133 } 133 }
134 134
135 sch->q.qlen++; 135 sch->q.qlen++;
136 qdisc_bstats_update(sch, skb);
137 return NET_XMIT_SUCCESS; 136 return NET_XMIT_SUCCESS;
138} 137}
139 138
@@ -186,6 +185,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
186 q->ptokens = ptoks; 185 q->ptokens = ptoks;
187 sch->q.qlen--; 186 sch->q.qlen--;
188 qdisc_unthrottled(sch); 187 qdisc_unthrottled(sch);
188 qdisc_bstats_update(sch, skb);
189 return skb; 189 return skb;
190 } 190 }
191 191
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 64c071ded0f4..45cd30098e34 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -85,7 +85,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
85 85
86 if (q->q.qlen < dev->tx_queue_len) { 86 if (q->q.qlen < dev->tx_queue_len) {
87 __skb_queue_tail(&q->q, skb); 87 __skb_queue_tail(&q->q, skb);
88 qdisc_bstats_update(sch, skb);
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
91 90
@@ -109,6 +108,8 @@ teql_dequeue(struct Qdisc *sch)
109 dat->m->slaves = sch; 108 dat->m->slaves = sch;
110 netif_wake_queue(m); 109 netif_wake_queue(m);
111 } 110 }
111 } else {
112 qdisc_bstats_update(sch, skb);
112 } 113 }
113 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 114 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
114 return skb; 115 return skb;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a09b0dd25f50..8e02550ff3e8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3428,7 +3428,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3428 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3428 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3429 break; 3429 break;
3430 3430
3431 case SCTP_DELAYED_ACK: 3431 case SCTP_DELAYED_SACK:
3432 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3432 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3433 break; 3433 break;
3434 case SCTP_PARTIAL_DELIVERY_POINT: 3434 case SCTP_PARTIAL_DELIVERY_POINT:
@@ -5333,7 +5333,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5333 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5333 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5334 optlen); 5334 optlen);
5335 break; 5335 break;
5336 case SCTP_DELAYED_ACK: 5336 case SCTP_DELAYED_SACK:
5337 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5337 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
5338 optlen); 5338 optlen);
5339 break; 5339 break;
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index d0ee29063e5d..1f1ef70f34f2 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,7 +95,7 @@ config CFG80211_DEBUGFS
95 If unsure, say N. 95 If unsure, say N.
96 96
97config CFG80211_INTERNAL_REGDB 97config CFG80211_INTERNAL_REGDB
98 bool "use statically compiled regulatory rules database" if EMBEDDED 98 bool "use statically compiled regulatory rules database" if EXPERT
99 default n 99 default n
100 depends on CFG80211 100 depends on CFG80211
101 ---help--- 101 ---help---
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5e1e0b08890..61291965c5f6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2189,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2189 2189
2190 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2190 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2191 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2191 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2192 (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 2192 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2193 if (link->dump == NULL) 2193 if (link->dump == NULL)
2194 return -EINVAL; 2194 return -EINVAL;
2195 2195