aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorSimon Horman <horms@verge.net.au>2008-08-19 03:36:22 -0400
committerSimon Horman <horms@verge.net.au>2008-08-19 03:36:22 -0400
commit3f087668c4e7c97289f0a67f9278ae6e0a765a80 (patch)
tree6b278344bf96d31a328bf76e445b189bff5f0ce9 /net
parent51df1901394a714d1a17202da02ae4957260eab5 (diff)
parente5befbd9525d92bb074b70192eb2c69aae65fc60 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_device.c15
-rw-r--r--net/core/datagram.c87
-rw-r--r--net/core/dev.c47
-rw-r--r--net/core/skbuff.c12
-rw-r--r--net/dccp/input.c12
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c36
-rw-r--r--net/rfkill/rfkill.c14
-rw-r--r--net/sched/sch_api.c33
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_generic.c68
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sched/sch_tbf.c11
17 files changed, 215 insertions, 146 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 9b58d70b0e7d..4f52c3d50ebe 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -148,11 +148,16 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
148} 148}
149 149
150static struct ethtool_ops br_ethtool_ops = { 150static struct ethtool_ops br_ethtool_ops = {
151 .get_drvinfo = br_getinfo, 151 .get_drvinfo = br_getinfo,
152 .get_link = ethtool_op_get_link, 152 .get_link = ethtool_op_get_link,
153 .set_sg = br_set_sg, 153 .get_tx_csum = ethtool_op_get_tx_csum,
154 .set_tx_csum = br_set_tx_csum, 154 .set_tx_csum = br_set_tx_csum,
155 .set_tso = br_set_tso, 155 .get_sg = ethtool_op_get_sg,
156 .set_sg = br_set_sg,
157 .get_tso = ethtool_op_get_tso,
158 .set_tso = br_set_tso,
159 .get_ufo = ethtool_op_get_ufo,
160 .get_flags = ethtool_op_get_flags,
156}; 161};
157 162
158void br_dev_setup(struct net_device *dev) 163void br_dev_setup(struct net_device *dev)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index dd61dcad6019..52f577a0f544 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -339,6 +339,93 @@ fault:
339 return -EFAULT; 339 return -EFAULT;
340} 340}
341 341
342/**
343 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
344 * @skb: buffer to copy
345 * @offset: offset in the buffer to start copying to
346 * @from: io vector to copy to
347 * @len: amount of data to copy to buffer from iovec
348 *
349 * Returns 0 or -EFAULT.
350 * Note: the iovec is modified during the copy.
351 */
352int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
353 struct iovec *from, int len)
354{
355 int start = skb_headlen(skb);
356 int i, copy = start - offset;
357
358 /* Copy header. */
359 if (copy > 0) {
360 if (copy > len)
361 copy = len;
362 if (memcpy_fromiovec(skb->data + offset, from, copy))
363 goto fault;
364 if ((len -= copy) == 0)
365 return 0;
366 offset += copy;
367 }
368
369 /* Copy paged appendix. Hmm... why does this look so complicated? */
370 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
371 int end;
372
373 WARN_ON(start > offset + len);
374
375 end = start + skb_shinfo(skb)->frags[i].size;
376 if ((copy = end - offset) > 0) {
377 int err;
378 u8 *vaddr;
379 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
380 struct page *page = frag->page;
381
382 if (copy > len)
383 copy = len;
384 vaddr = kmap(page);
385 err = memcpy_fromiovec(vaddr + frag->page_offset +
386 offset - start, from, copy);
387 kunmap(page);
388 if (err)
389 goto fault;
390
391 if (!(len -= copy))
392 return 0;
393 offset += copy;
394 }
395 start = end;
396 }
397
398 if (skb_shinfo(skb)->frag_list) {
399 struct sk_buff *list = skb_shinfo(skb)->frag_list;
400
401 for (; list; list = list->next) {
402 int end;
403
404 WARN_ON(start > offset + len);
405
406 end = start + list->len;
407 if ((copy = end - offset) > 0) {
408 if (copy > len)
409 copy = len;
410 if (skb_copy_datagram_from_iovec(list,
411 offset - start,
412 from, copy))
413 goto fault;
414 if ((len -= copy) == 0)
415 return 0;
416 offset += copy;
417 }
418 start = end;
419 }
420 }
421 if (!len)
422 return 0;
423
424fault:
425 return -EFAULT;
426}
427EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
428
342static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, 429static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
343 u8 __user *to, int len, 430 u8 __user *to, int len,
344 __wsum *csump) 431 __wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 600bb23c4c2e..8d133802372b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1339} 1339}
1340 1340
1341 1341
1342void __netif_schedule(struct Qdisc *q) 1342static inline void __netif_reschedule(struct Qdisc *q)
1343{ 1343{
1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1344 struct softnet_data *sd;
1345 struct softnet_data *sd; 1345 unsigned long flags;
1346 unsigned long flags;
1347 1346
1348 local_irq_save(flags); 1347 local_irq_save(flags);
1349 sd = &__get_cpu_var(softnet_data); 1348 sd = &__get_cpu_var(softnet_data);
1350 q->next_sched = sd->output_queue; 1349 q->next_sched = sd->output_queue;
1351 sd->output_queue = q; 1350 sd->output_queue = q;
1352 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1353 local_irq_restore(flags); 1352 local_irq_restore(flags);
1354 } 1353}
1354
1355void __netif_schedule(struct Qdisc *q)
1356{
1357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
1355} 1359}
1356EXPORT_SYMBOL(__netif_schedule); 1360EXPORT_SYMBOL(__netif_schedule);
1357 1361
@@ -1800,9 +1804,13 @@ gso:
1800 1804
1801 spin_lock(root_lock); 1805 spin_lock(root_lock);
1802 1806
1803 rc = qdisc_enqueue_root(skb, q); 1807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1804 qdisc_run(q); 1808 kfree_skb(skb);
1805 1809 rc = NET_XMIT_DROP;
1810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
1813 }
1806 spin_unlock(root_lock); 1814 spin_unlock(root_lock);
1807 1815
1808 goto out; 1816 goto out;
@@ -1974,15 +1982,15 @@ static void net_tx_action(struct softirq_action *h)
1974 1982
1975 head = head->next_sched; 1983 head = head->next_sched;
1976 1984
1977 smp_mb__before_clear_bit();
1978 clear_bit(__QDISC_STATE_SCHED, &q->state);
1979
1980 root_lock = qdisc_lock(q); 1985 root_lock = qdisc_lock(q);
1981 if (spin_trylock(root_lock)) { 1986 if (spin_trylock(root_lock)) {
1987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
1982 qdisc_run(q); 1990 qdisc_run(q);
1983 spin_unlock(root_lock); 1991 spin_unlock(root_lock);
1984 } else { 1992 } else {
1985 __netif_schedule(q); 1993 __netif_reschedule(q);
1986 } 1994 }
1987 } 1995 }
1988 } 1996 }
@@ -2084,7 +2092,8 @@ static int ing_filter(struct sk_buff *skb)
2084 q = rxq->qdisc; 2092 q = rxq->qdisc;
2085 if (q != &noop_qdisc) { 2093 if (q != &noop_qdisc) {
2086 spin_lock(qdisc_lock(q)); 2094 spin_lock(qdisc_lock(q));
2087 result = qdisc_enqueue_root(skb, q); 2095 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2096 result = qdisc_enqueue_root(skb, q);
2088 spin_unlock(qdisc_lock(q)); 2097 spin_unlock(qdisc_lock(q));
2089 } 2098 }
2090 2099
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 84640172d65d..ca1ccdf1ef76 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2256 segs = nskb; 2256 segs = nskb;
2257 tail = nskb; 2257 tail = nskb;
2258 2258
2259 nskb->dev = skb->dev; 2259 __copy_skb_header(nskb, skb);
2260 skb_copy_queue_mapping(nskb, skb);
2261 nskb->priority = skb->priority;
2262 nskb->protocol = skb->protocol;
2263 nskb->vlan_tci = skb->vlan_tci;
2264 nskb->dst = dst_clone(skb->dst);
2265 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
2266 nskb->pkt_type = skb->pkt_type;
2267 nskb->mac_len = skb->mac_len; 2260 nskb->mac_len = skb->mac_len;
2268 2261
2269 skb_reserve(nskb, headroom); 2262 skb_reserve(nskb, headroom);
@@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2274 skb_copy_from_linear_data(skb, skb_put(nskb, doffset), 2267 skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
2275 doffset); 2268 doffset);
2276 if (!sg) { 2269 if (!sg) {
2270 nskb->ip_summed = CHECKSUM_NONE;
2277 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2271 nskb->csum = skb_copy_and_csum_bits(skb, offset,
2278 skb_put(nskb, len), 2272 skb_put(nskb, len),
2279 len, 0); 2273 len, 0);
@@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2283 frag = skb_shinfo(nskb)->frags; 2277 frag = skb_shinfo(nskb)->frags;
2284 k = 0; 2278 k = 0;
2285 2279
2286 nskb->ip_summed = CHECKSUM_PARTIAL;
2287 nskb->csum = skb->csum;
2288 skb_copy_from_linear_data_offset(skb, offset, 2280 skb_copy_from_linear_data_offset(skb, offset,
2289 skb_put(nskb, hsize), hsize); 2281 skb_put(nskb, hsize), hsize);
2290 2282
diff --git a/net/dccp/input.c b/net/dccp/input.c
index df2f110df94a..803933ab396d 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -411,12 +411,6 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
411 struct dccp_sock *dp = dccp_sk(sk); 411 struct dccp_sock *dp = dccp_sk(sk);
412 long tstamp = dccp_timestamp(); 412 long tstamp = dccp_timestamp();
413 413
414 /* Stop the REQUEST timer */
415 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
416 WARN_ON(sk->sk_send_head == NULL);
417 __kfree_skb(sk->sk_send_head);
418 sk->sk_send_head = NULL;
419
420 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, 414 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
421 dp->dccps_awl, dp->dccps_awh)) { 415 dp->dccps_awl, dp->dccps_awh)) {
422 dccp_pr_debug("invalid ackno: S.AWL=%llu, " 416 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
@@ -441,6 +435,12 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
441 DCCP_ACKVEC_STATE_RECEIVED)) 435 DCCP_ACKVEC_STATE_RECEIVED))
442 goto out_invalid_packet; /* FIXME: change error code */ 436 goto out_invalid_packet; /* FIXME: change error code */
443 437
438 /* Stop the REQUEST timer */
439 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
440 WARN_ON(sk->sk_send_head == NULL);
441 kfree_skb(sk->sk_send_head);
442 sk->sk_send_head = NULL;
443
444 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq; 444 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
445 dccp_update_gsr(sk, dp->dccps_isr); 445 dccp_update_gsr(sk, dp->dccps_isr);
446 /* 446 /*
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 49587a497229..462a22c97877 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct net_device *in,
70 (info->flags & IPT_ADDRTYPE_INVERT_SOURCE); 70 (info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
71 if (ret && info->dest) 71 if (ret && info->dest)
72 ret &= match_type(dev, iph->daddr, info->dest) ^ 72 ret &= match_type(dev, iph->daddr, info->dest) ^
73 (info->flags & IPT_ADDRTYPE_INVERT_DEST); 73 !!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
74 return ret; 74 return ret;
75} 75}
76 76
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 91537f11273f..6c4f11f51446 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -73,9 +73,13 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
73 range_size = ntohs(range->max.all) - min + 1; 73 range_size = ntohs(range->max.all) - min + 1;
74 } 74 }
75 75
76 off = *rover;
77 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) 76 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
78 off = net_random(); 77 off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
78 maniptype == IP_NAT_MANIP_SRC
79 ? tuple->dst.u.all
80 : tuple->src.u.all);
81 else
82 off = *rover;
79 83
80 for (i = 0; i < range_size; i++, off++) { 84 for (i = 0; i < range_size; i++, off++) {
81 *portptr = htons(min + off % range_size); 85 *portptr = htons(min + off % range_size);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 741cfcd96f88..4e5eac301f91 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -911,7 +911,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
911 } else { 911 } else {
912 if (np->rxopt.bits.rxinfo) { 912 if (np->rxopt.bits.rxinfo) {
913 struct in6_pktinfo src_info; 913 struct in6_pktinfo src_info;
914 src_info.ipi6_ifindex = np->mcast_oif; 914 src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if;
915 ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); 915 ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr);
916 put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); 916 put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
917 } 917 }
@@ -921,7 +921,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
921 } 921 }
922 if (np->rxopt.bits.rxoinfo) { 922 if (np->rxopt.bits.rxoinfo) {
923 struct in6_pktinfo src_info; 923 struct in6_pktinfo src_info;
924 src_info.ipi6_ifindex = np->mcast_oif; 924 src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if;
925 ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); 925 ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr);
926 put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); 926 put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
927 } 927 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e1d11c9b6729..1e97fb9fb34b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2103,6 +2103,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2103 rcu_read_unlock(); 2103 rcu_read_unlock();
2104 return; 2104 return;
2105 } 2105 }
2106 /* update new sta with its last rx activity */
2107 sta->last_rx = jiffies;
2106 } 2108 }
2107 2109
2108 /* 2110 /*
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 105a616c5c78..a8752031adcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -968,7 +968,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
968 /* need to zero data of old helper */ 968 /* need to zero data of old helper */
969 memset(&help->help, 0, sizeof(help->help)); 969 memset(&help->help, 0, sizeof(help->help));
970 } else { 970 } else {
971 help = nf_ct_helper_ext_add(ct, GFP_KERNEL); 971 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
972 if (help == NULL) 972 if (help == NULL)
973 return -ENOMEM; 973 return -ENOMEM;
974 } 974 }
@@ -1136,16 +1136,33 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1136 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 1136 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1137 ct->status |= IPS_CONFIRMED; 1137 ct->status |= IPS_CONFIRMED;
1138 1138
1139 rcu_read_lock();
1140 helper = __nf_ct_helper_find(rtuple);
1141 if (helper) {
1142 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1143 if (help == NULL) {
1144 rcu_read_unlock();
1145 err = -ENOMEM;
1146 goto err;
1147 }
1148 /* not in hash table yet so not strictly necessary */
1149 rcu_assign_pointer(help->helper, helper);
1150 }
1151
1139 if (cda[CTA_STATUS]) { 1152 if (cda[CTA_STATUS]) {
1140 err = ctnetlink_change_status(ct, cda); 1153 err = ctnetlink_change_status(ct, cda);
1141 if (err < 0) 1154 if (err < 0) {
1155 rcu_read_unlock();
1142 goto err; 1156 goto err;
1157 }
1143 } 1158 }
1144 1159
1145 if (cda[CTA_PROTOINFO]) { 1160 if (cda[CTA_PROTOINFO]) {
1146 err = ctnetlink_change_protoinfo(ct, cda); 1161 err = ctnetlink_change_protoinfo(ct, cda);
1147 if (err < 0) 1162 if (err < 0) {
1163 rcu_read_unlock();
1148 goto err; 1164 goto err;
1165 }
1149 } 1166 }
1150 1167
1151 nf_ct_acct_ext_add(ct, GFP_KERNEL); 1168 nf_ct_acct_ext_add(ct, GFP_KERNEL);
@@ -1155,19 +1172,6 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1155 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1172 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1156#endif 1173#endif
1157 1174
1158 rcu_read_lock();
1159 helper = __nf_ct_helper_find(rtuple);
1160 if (helper) {
1161 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
1162 if (help == NULL) {
1163 rcu_read_unlock();
1164 err = -ENOMEM;
1165 goto err;
1166 }
1167 /* not in hash table yet so not strictly necessary */
1168 rcu_assign_pointer(help->helper, helper);
1169 }
1170
1171 /* setup master conntrack: this is a confirmed expectation */ 1175 /* setup master conntrack: this is a confirmed expectation */
1172 if (master_ct) { 1176 if (master_ct) {
1173 __set_bit(IPS_EXPECTED_BIT, &ct->status); 1177 __set_bit(IPS_EXPECTED_BIT, &ct->status);
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index d2d45655cd1a..35a9994e2339 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -150,6 +150,8 @@ static void update_rfkill_state(struct rfkill *rfkill)
150 * calls and handling all the red tape such as issuing notifications 150 * calls and handling all the red tape such as issuing notifications
151 * if the call is successful. 151 * if the call is successful.
152 * 152 *
153 * Suspended devices are not touched at all, and -EAGAIN is returned.
154 *
153 * Note that the @force parameter cannot override a (possibly cached) 155 * Note that the @force parameter cannot override a (possibly cached)
154 * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of 156 * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
155 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or 157 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or
@@ -168,6 +170,9 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
168 int retval = 0; 170 int retval = 0;
169 enum rfkill_state oldstate, newstate; 171 enum rfkill_state oldstate, newstate;
170 172
173 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
174 return -EBUSY;
175
171 oldstate = rfkill->state; 176 oldstate = rfkill->state;
172 177
173 if (rfkill->get_state && !force && 178 if (rfkill->get_state && !force &&
@@ -214,7 +219,7 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
214 * 219 *
215 * This function toggles the state of all switches of given type, 220 * This function toggles the state of all switches of given type,
216 * unless a specific switch is claimed by userspace (in which case, 221 * unless a specific switch is claimed by userspace (in which case,
217 * that switch is left alone). 222 * that switch is left alone) or suspended.
218 */ 223 */
219void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 224void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
220{ 225{
@@ -239,8 +244,8 @@ EXPORT_SYMBOL(rfkill_switch_all);
239/** 244/**
240 * rfkill_epo - emergency power off all transmitters 245 * rfkill_epo - emergency power off all transmitters
241 * 246 *
242 * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring 247 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
243 * everything in its path but rfkill_mutex and rfkill->mutex. 248 * ignoring everything in its path but rfkill_mutex and rfkill->mutex.
244 */ 249 */
245void rfkill_epo(void) 250void rfkill_epo(void)
246{ 251{
@@ -458,13 +463,14 @@ static int rfkill_resume(struct device *dev)
458 if (dev->power.power_state.event != PM_EVENT_ON) { 463 if (dev->power.power_state.event != PM_EVENT_ON) {
459 mutex_lock(&rfkill->mutex); 464 mutex_lock(&rfkill->mutex);
460 465
466 dev->power.power_state.event = PM_EVENT_ON;
467
461 /* restore radio state AND notify everybody */ 468 /* restore radio state AND notify everybody */
462 rfkill_toggle_radio(rfkill, rfkill->state, 1); 469 rfkill_toggle_radio(rfkill, rfkill->state, 1);
463 470
464 mutex_unlock(&rfkill->mutex); 471 mutex_unlock(&rfkill->mutex);
465 } 472 }
466 473
467 dev->power.power_state = PMSG_ON;
468 return 0; 474 return 0;
469} 475}
470#else 476#else
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c25465e5607a..9372ec41ce84 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -27,6 +27,7 @@
27#include <linux/kmod.h> 27#include <linux/kmod.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/hrtimer.h> 29#include <linux/hrtimer.h>
30#include <linux/lockdep.h>
30 31
31#include <net/net_namespace.h> 32#include <net/net_namespace.h>
32#include <net/sock.h> 33#include <net/sock.h>
@@ -426,7 +427,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
426 427
427 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 428 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
428 smp_wmb(); 429 smp_wmb();
429 __netif_schedule(wd->qdisc); 430 __netif_schedule(qdisc_root(wd->qdisc));
430 431
431 return HRTIMER_NORESTART; 432 return HRTIMER_NORESTART;
432} 433}
@@ -637,11 +638,8 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid
637 if (new || old) 638 if (new || old)
638 qdisc_notify(skb, n, clid, old, new); 639 qdisc_notify(skb, n, clid, old, new);
639 640
640 if (old) { 641 if (old)
641 spin_lock_bh(&old->q.lock);
642 qdisc_destroy(old); 642 qdisc_destroy(old);
643 spin_unlock_bh(&old->q.lock);
644 }
645} 643}
646 644
647/* Graft qdisc "new" to class "classid" of qdisc "parent" or 645/* Graft qdisc "new" to class "classid" of qdisc "parent" or
@@ -707,6 +705,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
707 return err; 705 return err;
708} 706}
709 707
708/* lockdep annotation is needed for ingress; egress gets it only for name */
709static struct lock_class_key qdisc_tx_lock;
710static struct lock_class_key qdisc_rx_lock;
711
710/* 712/*
711 Allocate and initialize new qdisc. 713 Allocate and initialize new qdisc.
712 714
@@ -767,6 +769,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
767 if (handle == TC_H_INGRESS) { 769 if (handle == TC_H_INGRESS) {
768 sch->flags |= TCQ_F_INGRESS; 770 sch->flags |= TCQ_F_INGRESS;
769 handle = TC_H_MAKE(TC_H_INGRESS, 0); 771 handle = TC_H_MAKE(TC_H_INGRESS, 0);
772 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
770 } else { 773 } else {
771 if (handle == 0) { 774 if (handle == 0) {
772 handle = qdisc_alloc_handle(dev); 775 handle = qdisc_alloc_handle(dev);
@@ -774,6 +777,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
774 if (handle == 0) 777 if (handle == 0)
775 goto err_out3; 778 goto err_out3;
776 } 779 }
780 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
777 } 781 }
778 782
779 sch->handle = handle; 783 sch->handle = handle;
@@ -1084,20 +1088,13 @@ create_n_graft:
1084 } 1088 }
1085 1089
1086graft: 1090graft:
1087 if (1) { 1091 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1088 spinlock_t *root_lock; 1092 if (err) {
1089 1093 if (q)
1090 err = qdisc_graft(dev, p, skb, n, clid, q, NULL); 1094 qdisc_destroy(q);
1091 if (err) { 1095 return err;
1092 if (q) {
1093 root_lock = qdisc_root_lock(q);
1094 spin_lock_bh(root_lock);
1095 qdisc_destroy(q);
1096 spin_unlock_bh(root_lock);
1097 }
1098 return err;
1099 }
1100 } 1096 }
1097
1101 return 0; 1098 return 0;
1102} 1099}
1103 1100
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 4e261ce62f48..47ef492c4ff4 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -654,7 +654,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
654 } 654 }
655 655
656 sch->flags &= ~TCQ_F_THROTTLED; 656 sch->flags &= ~TCQ_F_THROTTLED;
657 __netif_schedule(sch); 657 __netif_schedule(qdisc_root(sch));
658 return HRTIMER_NORESTART; 658 return HRTIMER_NORESTART;
659} 659}
660 660
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 468574682caa..c3ed4d44fc14 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -518,14 +518,17 @@ void qdisc_reset(struct Qdisc *qdisc)
518} 518}
519EXPORT_SYMBOL(qdisc_reset); 519EXPORT_SYMBOL(qdisc_reset);
520 520
521/* this is the rcu callback function to clean up a qdisc when there 521void qdisc_destroy(struct Qdisc *qdisc)
522 * are no further references to it */
523
524static void __qdisc_destroy(struct rcu_head *head)
525{ 522{
526 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
527 const struct Qdisc_ops *ops = qdisc->ops; 523 const struct Qdisc_ops *ops = qdisc->ops;
528 524
525 if (qdisc->flags & TCQ_F_BUILTIN ||
526 !atomic_dec_and_test(&qdisc->refcnt))
527 return;
528
529 if (qdisc->parent)
530 list_del(&qdisc->list);
531
529#ifdef CONFIG_NET_SCHED 532#ifdef CONFIG_NET_SCHED
530 qdisc_put_stab(qdisc->stab); 533 qdisc_put_stab(qdisc->stab);
531#endif 534#endif
@@ -542,20 +545,6 @@ static void __qdisc_destroy(struct rcu_head *head)
542 545
543 kfree((char *) qdisc - qdisc->padded); 546 kfree((char *) qdisc - qdisc->padded);
544} 547}
545
546/* Under qdisc_lock(qdisc) and BH! */
547
548void qdisc_destroy(struct Qdisc *qdisc)
549{
550 if (qdisc->flags & TCQ_F_BUILTIN ||
551 !atomic_dec_and_test(&qdisc->refcnt))
552 return;
553
554 if (qdisc->parent)
555 list_del(&qdisc->list);
556
557 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
558}
559EXPORT_SYMBOL(qdisc_destroy); 548EXPORT_SYMBOL(qdisc_destroy);
560 549
561static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) 550static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
@@ -597,6 +586,9 @@ static void transition_one_qdisc(struct net_device *dev,
597 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 586 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
598 int *need_watchdog_p = _need_watchdog; 587 int *need_watchdog_p = _need_watchdog;
599 588
589 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
590 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
591
600 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 592 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
601 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) 593 if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
602 *need_watchdog_p = 1; 594 *need_watchdog_p = 1;
@@ -640,6 +632,9 @@ static void dev_deactivate_queue(struct net_device *dev,
640 if (qdisc) { 632 if (qdisc) {
641 spin_lock_bh(qdisc_lock(qdisc)); 633 spin_lock_bh(qdisc_lock(qdisc));
642 634
635 if (!(qdisc->flags & TCQ_F_BUILTIN))
636 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
637
643 dev_queue->qdisc = qdisc_default; 638 dev_queue->qdisc = qdisc_default;
644 qdisc_reset(qdisc); 639 qdisc_reset(qdisc);
645 640
@@ -647,7 +642,7 @@ static void dev_deactivate_queue(struct net_device *dev,
647 } 642 }
648} 643}
649 644
650static bool some_qdisc_is_busy(struct net_device *dev, int lock) 645static bool some_qdisc_is_busy(struct net_device *dev)
651{ 646{
652 unsigned int i; 647 unsigned int i;
653 648
@@ -661,14 +656,12 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
661 q = dev_queue->qdisc_sleeping; 656 q = dev_queue->qdisc_sleeping;
662 root_lock = qdisc_lock(q); 657 root_lock = qdisc_lock(q);
663 658
664 if (lock) 659 spin_lock_bh(root_lock);
665 spin_lock_bh(root_lock);
666 660
667 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || 661 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
668 test_bit(__QDISC_STATE_SCHED, &q->state)); 662 test_bit(__QDISC_STATE_SCHED, &q->state));
669 663
670 if (lock) 664 spin_unlock_bh(root_lock);
671 spin_unlock_bh(root_lock);
672 665
673 if (val) 666 if (val)
674 return true; 667 return true;
@@ -678,8 +671,6 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
678 671
679void dev_deactivate(struct net_device *dev) 672void dev_deactivate(struct net_device *dev)
680{ 673{
681 bool running;
682
683 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 674 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
684 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); 675 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
685 676
@@ -689,25 +680,8 @@ void dev_deactivate(struct net_device *dev)
689 synchronize_rcu(); 680 synchronize_rcu();
690 681
691 /* Wait for outstanding qdisc_run calls. */ 682 /* Wait for outstanding qdisc_run calls. */
692 do { 683 while (some_qdisc_is_busy(dev))
693 while (some_qdisc_is_busy(dev, 0)) 684 yield();
694 yield();
695
696 /*
697 * Double-check inside queue lock to ensure that all effects
698 * of the queue run are visible when we return.
699 */
700 running = some_qdisc_is_busy(dev, 1);
701
702 /*
703 * The running flag should never be set at this point because
704 * we've already set dev->qdisc to noop_qdisc *inside* the same
705 * pair of spin locks. That is, if any qdisc_run starts after
706 * our initial test it should see the noop_qdisc and then
707 * clear the RUNNING bit before dropping the queue lock. So
708 * if it is set here then we've found a bug.
709 */
710 } while (WARN_ON_ONCE(running));
711} 685}
712 686
713static void dev_init_scheduler_queue(struct net_device *dev, 687static void dev_init_scheduler_queue(struct net_device *dev,
@@ -736,14 +710,10 @@ static void shutdown_scheduler_queue(struct net_device *dev,
736 struct Qdisc *qdisc_default = _qdisc_default; 710 struct Qdisc *qdisc_default = _qdisc_default;
737 711
738 if (qdisc) { 712 if (qdisc) {
739 spinlock_t *root_lock = qdisc_lock(qdisc);
740
741 dev_queue->qdisc = qdisc_default; 713 dev_queue->qdisc = qdisc_default;
742 dev_queue->qdisc_sleeping = qdisc_default; 714 dev_queue->qdisc_sleeping = qdisc_default;
743 715
744 spin_lock_bh(root_lock);
745 qdisc_destroy(qdisc); 716 qdisc_destroy(qdisc);
746 spin_unlock_bh(root_lock);
747 } 717 }
748} 718}
749 719
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 6febd245e62b..0df0df202ed0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
577 sch->qstats.drops++; 577 sch->qstats.drops++;
578 cl->qstats.drops++; 578 cl->qstats.drops++;
579 } 579 }
580 return NET_XMIT_DROP; 580 return ret;
581 } else { 581 } else {
582 cl->bstats.packets += 582 cl->bstats.packets +=
583 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 583 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
@@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
623 sch->qstats.drops++; 623 sch->qstats.drops++;
624 cl->qstats.drops++; 624 cl->qstats.drops++;
625 } 625 }
626 return NET_XMIT_DROP; 626 return ret;
627 } else 627 } else
628 htb_activate(q, cl); 628 htb_activate(q, cl);
629 629
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index eac197610edf..a6697c686c7f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -113,11 +113,11 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) { 113 if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
114 sch->q.qlen++; 114 sch->q.qlen++;
115 sch->qstats.requeues++; 115 sch->qstats.requeues++;
116 return 0; 116 return NET_XMIT_SUCCESS;
117 } 117 }
118 if (net_xmit_drop_count(ret)) 118 if (net_xmit_drop_count(ret))
119 sch->qstats.drops++; 119 sch->qstats.drops++;
120 return NET_XMIT_DROP; 120 return ret;
121} 121}
122 122
123 123
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7d3b7ff3bf07..94c61598b86a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
123 struct tbf_sched_data *q = qdisc_priv(sch); 123 struct tbf_sched_data *q = qdisc_priv(sch);
124 int ret; 124 int ret;
125 125
126 if (qdisc_pkt_len(skb) > q->max_size) { 126 if (qdisc_pkt_len(skb) > q->max_size)
127 sch->qstats.drops++; 127 return qdisc_reshape_fail(skb, sch);
128#ifdef CONFIG_NET_CLS_ACT
129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
130#endif
131 kfree_skb(skb);
132
133 return NET_XMIT_DROP;
134 }
135 128
136 ret = qdisc_enqueue(skb, q->qdisc); 129 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) { 130 if (ret != 0) {