aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-16 10:44:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-16 10:44:27 -0400
commitb4b8f57965e007afbbb0175ea28f733723c5260b (patch)
tree7f4aeda48ada35771ea3e63bbbb36e52c47aeb62 /net
parent424b00e2c0f0c38f2cf5331391742ec998f6d89f (diff)
parent56f367bbfd5a7439961499ca6a2f0822d2074d83 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: [TCP]: Add return value indication to tcp_prune_ofo_queue(). PS3: gelic: fix the oops on the broken IE returned from the hypervisor b43legacy: fix DMA mapping leakage mac80211: remove message on receiving unexpected unencrypted frames Update rt2x00 MAINTAINERS entry Add rfkill to MAINTAINERS file rfkill: Fix device type check when toggling states b43legacy: Fix usage of struct device used for DMAing ssb: Fix usage of struct device used for DMAing MAINTAINERS: move to generic repository for iwlwifi b43legacy: fix initvals loading on bcm4303 rtl8187: Add missing priv->vif assignments netconsole: only set CON_PRINTBUFFER if the user specifies a netconsole [CAN]: Update documentation of struct sockaddr_can MAINTAINERS: isdn4linux@listserv.isdn4linux.de is subscribers-only [TCP]: Fix never pruned tcp out-of-order queue. [NET_SCHED] sch_api: fix qdisc_tree_decrease_qlen() loop
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c78
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/rfkill/rfkill.c2
-rw-r--r--net/sched/sch_api.c3
4 files changed, 58 insertions, 32 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5119856017ab..bbb7d88a16b4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3841,8 +3841,28 @@ static void tcp_ofo_queue(struct sock *sk)
3841 } 3841 }
3842} 3842}
3843 3843
3844static int tcp_prune_ofo_queue(struct sock *sk);
3844static int tcp_prune_queue(struct sock *sk); 3845static int tcp_prune_queue(struct sock *sk);
3845 3846
3847static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
3848{
3849 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3850 !sk_rmem_schedule(sk, size)) {
3851
3852 if (tcp_prune_queue(sk) < 0)
3853 return -1;
3854
3855 if (!sk_rmem_schedule(sk, size)) {
3856 if (!tcp_prune_ofo_queue(sk))
3857 return -1;
3858
3859 if (!sk_rmem_schedule(sk, size))
3860 return -1;
3861 }
3862 }
3863 return 0;
3864}
3865
3846static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 3866static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3847{ 3867{
3848 struct tcphdr *th = tcp_hdr(skb); 3868 struct tcphdr *th = tcp_hdr(skb);
@@ -3892,12 +3912,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3892 if (eaten <= 0) { 3912 if (eaten <= 0) {
3893queue_and_out: 3913queue_and_out:
3894 if (eaten < 0 && 3914 if (eaten < 0 &&
3895 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3915 tcp_try_rmem_schedule(sk, skb->truesize))
3896 !sk_rmem_schedule(sk, skb->truesize))) { 3916 goto drop;
3897 if (tcp_prune_queue(sk) < 0 || 3917
3898 !sk_rmem_schedule(sk, skb->truesize))
3899 goto drop;
3900 }
3901 skb_set_owner_r(skb, sk); 3918 skb_set_owner_r(skb, sk);
3902 __skb_queue_tail(&sk->sk_receive_queue, skb); 3919 __skb_queue_tail(&sk->sk_receive_queue, skb);
3903 } 3920 }
@@ -3966,12 +3983,8 @@ drop:
3966 3983
3967 TCP_ECN_check_ce(tp, skb); 3984 TCP_ECN_check_ce(tp, skb);
3968 3985
3969 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3986 if (tcp_try_rmem_schedule(sk, skb->truesize))
3970 !sk_rmem_schedule(sk, skb->truesize)) { 3987 goto drop;
3971 if (tcp_prune_queue(sk) < 0 ||
3972 !sk_rmem_schedule(sk, skb->truesize))
3973 goto drop;
3974 }
3975 3988
3976 /* Disable header prediction. */ 3989 /* Disable header prediction. */
3977 tp->pred_flags = 0; 3990 tp->pred_flags = 0;
@@ -4198,6 +4211,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4198 } 4211 }
4199} 4212}
4200 4213
4214/*
4215 * Purge the out-of-order queue.
4216 * Return true if queue was pruned.
4217 */
4218static int tcp_prune_ofo_queue(struct sock *sk)
4219{
4220 struct tcp_sock *tp = tcp_sk(sk);
4221 int res = 0;
4222
4223 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4224 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
4225 __skb_queue_purge(&tp->out_of_order_queue);
4226
4227 /* Reset SACK state. A conforming SACK implementation will
4228 * do the same at a timeout based retransmit. When a connection
4229 * is in a sad state like this, we care only about integrity
4230 * of the connection not performance.
4231 */
4232 if (tp->rx_opt.sack_ok)
4233 tcp_sack_reset(&tp->rx_opt);
4234 sk_mem_reclaim(sk);
4235 res = 1;
4236 }
4237 return res;
4238}
4239
4201/* Reduce allocated memory if we can, trying to get 4240/* Reduce allocated memory if we can, trying to get
4202 * the socket within its memory limits again. 4241 * the socket within its memory limits again.
4203 * 4242 *
@@ -4231,20 +4270,7 @@ static int tcp_prune_queue(struct sock *sk)
4231 /* Collapsing did not help, destructive actions follow. 4270 /* Collapsing did not help, destructive actions follow.
4232 * This must not ever occur. */ 4271 * This must not ever occur. */
4233 4272
4234 /* First, purge the out_of_order queue. */ 4273 tcp_prune_ofo_queue(sk);
4235 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4236 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
4237 __skb_queue_purge(&tp->out_of_order_queue);
4238
4239 /* Reset SACK state. A conforming SACK implementation will
4240 * do the same at a timeout based retransmit. When a connection
4241 * is in a sad state like this, we care only about integrity
4242 * of the connection not performance.
4243 */
4244 if (tcp_is_sack(tp))
4245 tcp_sack_reset(&tp->rx_opt);
4246 sk_mem_reclaim(sk);
4247 }
4248 4274
4249 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4275 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4250 return 0; 4276 return 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 535407d07fa4..a8a40aba846b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1050,12 +1050,9 @@ ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx)
1050 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && 1050 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) &&
1051 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 1051 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
1052 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && 1052 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
1053 (rx->key || rx->sdata->drop_unencrypted))) { 1053 (rx->key || rx->sdata->drop_unencrypted)))
1054 if (net_ratelimit())
1055 printk(KERN_DEBUG "%s: RX non-WEP frame, but expected "
1056 "encryption\n", rx->dev->name);
1057 return -EACCES; 1054 return -EACCES;
1058 } 1055
1059 return 0; 1056 return 0;
1060} 1057}
1061 1058
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 140a0a8c6b02..4e10a95de832 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -92,7 +92,7 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
92 rfkill_states[type] = state; 92 rfkill_states[type] = state;
93 93
94 list_for_each_entry(rfkill, &rfkill_list, node) { 94 list_for_each_entry(rfkill, &rfkill_list, node) {
95 if (!rfkill->user_claim) 95 if ((!rfkill->user_claim) && (rfkill->type == type))
96 rfkill_toggle_radio(rfkill, state); 96 rfkill_toggle_radio(rfkill, state);
97 } 97 }
98 98
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7e3c048ba9b1..fc8708a0a25e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -386,6 +386,9 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
386 if (n == 0) 386 if (n == 0)
387 return; 387 return;
388 while ((parentid = sch->parent)) { 388 while ((parentid = sch->parent)) {
389 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
390 return;
391
389 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); 392 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
390 if (sch == NULL) { 393 if (sch == NULL) {
391 WARN_ON(parentid != TC_H_ROOT); 394 WARN_ON(parentid != TC_H_ROOT);