diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-02 13:34:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-02 13:34:49 -0400 |
commit | 2b3b29080d702e5488f214276170ab46adc40ee5 (patch) | |
tree | 53d719e8490fda0c24d3f503078d36400c813be9 | |
parent | e80eaf9904d5b19512265e1435372b2e12146a5f (diff) | |
parent | 04045f98e0457aba7d4e6736f37eed189c48a5f7 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[IEEE80211]: avoid integer underflow for runt rx frames
[TCP]: secure_tcp_sequence_number() should not use a too fast clock
[SFQ]: Remove artificial limitation for queue limit.
-rw-r--r-- | drivers/char/random.c | 10 | ||||
-rw-r--r-- | net/ieee80211/ieee80211_rx.c | 6 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 47 |
3 files changed, 43 insertions, 20 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 397c714cf2ba..af274e5a25ee 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1550,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1550 | * As close as possible to RFC 793, which | 1550 | * As close as possible to RFC 793, which |
1551 | * suggests using a 250 kHz clock. | 1551 | * suggests using a 250 kHz clock. |
1552 | * Further reading shows this assumes 2 Mb/s networks. | 1552 | * Further reading shows this assumes 2 Mb/s networks. |
1553 | * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. | 1553 | * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. |
1554 | * That's funny, Linux has one built in! Use it! | 1554 | * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but |
1555 | * (Networks are faster now - should this be increased?) | 1555 | * we also need to limit the resolution so that the u32 seq |
1556 | * overlaps less than one time per MSL (2 minutes). | ||
1557 | * Choosing a clock of 64 ns period is OK. (period of 274 s) | ||
1556 | */ | 1558 | */ |
1557 | seq += ktime_get_real().tv64; | 1559 | seq += ktime_get_real().tv64 >> 6; |
1558 | #if 0 | 1560 | #if 0 |
1559 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", | 1561 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", |
1560 | saddr, daddr, sport, dport, seq); | 1562 | saddr, daddr, sport, dport, seq); |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index f2de2e48b021..6284c99b456e 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
366 | frag = WLAN_GET_SEQ_FRAG(sc); | 366 | frag = WLAN_GET_SEQ_FRAG(sc); |
367 | hdrlen = ieee80211_get_hdrlen(fc); | 367 | hdrlen = ieee80211_get_hdrlen(fc); |
368 | 368 | ||
369 | if (skb->len < hdrlen) { | ||
370 | printk(KERN_INFO "%s: invalid SKB length %d\n", | ||
371 | dev->name, skb->len); | ||
372 | goto rx_dropped; | ||
373 | } | ||
374 | |||
369 | /* Put this code here so that we avoid duplicating it in all | 375 | /* Put this code here so that we avoid duplicating it in all |
370 | * Rx paths. - Jean II */ | 376 | * Rx paths. - Jean II */ |
371 | #ifdef CONFIG_WIRELESS_EXT | 377 | #ifdef CONFIG_WIRELESS_EXT |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 3a23e30bc79e..b542c875e154 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/ipv6.h> | 20 | #include <linux/ipv6.h> |
21 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
22 | #include <linux/jhash.h> | ||
22 | #include <net/ip.h> | 23 | #include <net/ip.h> |
23 | #include <net/netlink.h> | 24 | #include <net/netlink.h> |
24 | #include <net/pkt_sched.h> | 25 | #include <net/pkt_sched.h> |
@@ -95,7 +96,7 @@ struct sfq_sched_data | |||
95 | 96 | ||
96 | /* Variables */ | 97 | /* Variables */ |
97 | struct timer_list perturb_timer; | 98 | struct timer_list perturb_timer; |
98 | int perturbation; | 99 | u32 perturbation; |
99 | sfq_index tail; /* Index of current slot in round */ | 100 | sfq_index tail; /* Index of current slot in round */ |
100 | sfq_index max_depth; /* Maximal depth */ | 101 | sfq_index max_depth; /* Maximal depth */ |
101 | 102 | ||
@@ -109,12 +110,7 @@ struct sfq_sched_data | |||
109 | 110 | ||
110 | static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) | 111 | static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) |
111 | { | 112 | { |
112 | int pert = q->perturbation; | 113 | return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); |
113 | |||
114 | /* Have we any rotation primitives? If not, WHY? */ | ||
115 | h ^= (h1<<pert) ^ (h1>>(0x1F - pert)); | ||
116 | h ^= h>>10; | ||
117 | return h & 0x3FF; | ||
118 | } | 114 | } |
119 | 115 | ||
120 | static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | 116 | static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) |
@@ -256,6 +252,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
256 | q->ht[hash] = x = q->dep[SFQ_DEPTH].next; | 252 | q->ht[hash] = x = q->dep[SFQ_DEPTH].next; |
257 | q->hash[x] = hash; | 253 | q->hash[x] = hash; |
258 | } | 254 | } |
255 | /* If selected queue has length q->limit, this means that | ||
256 | * all another queues are empty and that we do simple tail drop, | ||
257 | * i.e. drop _this_ packet. | ||
258 | */ | ||
259 | if (q->qs[x].qlen >= q->limit) | ||
260 | return qdisc_drop(skb, sch); | ||
261 | |||
259 | sch->qstats.backlog += skb->len; | 262 | sch->qstats.backlog += skb->len; |
260 | __skb_queue_tail(&q->qs[x], skb); | 263 | __skb_queue_tail(&q->qs[x], skb); |
261 | sfq_inc(q, x); | 264 | sfq_inc(q, x); |
@@ -294,6 +297,19 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
294 | } | 297 | } |
295 | sch->qstats.backlog += skb->len; | 298 | sch->qstats.backlog += skb->len; |
296 | __skb_queue_head(&q->qs[x], skb); | 299 | __skb_queue_head(&q->qs[x], skb); |
300 | /* If selected queue has length q->limit+1, this means that | ||
301 | * all another queues are empty and we do simple tail drop. | ||
302 | * This packet is still requeued at head of queue, tail packet | ||
303 | * is dropped. | ||
304 | */ | ||
305 | if (q->qs[x].qlen > q->limit) { | ||
306 | skb = q->qs[x].prev; | ||
307 | __skb_unlink(skb, &q->qs[x]); | ||
308 | sch->qstats.drops++; | ||
309 | sch->qstats.backlog -= skb->len; | ||
310 | kfree_skb(skb); | ||
311 | return NET_XMIT_CN; | ||
312 | } | ||
297 | sfq_inc(q, x); | 313 | sfq_inc(q, x); |
298 | if (q->qs[x].qlen == 1) { /* The flow is new */ | 314 | if (q->qs[x].qlen == 1) { /* The flow is new */ |
299 | if (q->tail == SFQ_DEPTH) { /* It is the first flow */ | 315 | if (q->tail == SFQ_DEPTH) { /* It is the first flow */ |
@@ -370,12 +386,10 @@ static void sfq_perturbation(unsigned long arg) | |||
370 | struct Qdisc *sch = (struct Qdisc*)arg; | 386 | struct Qdisc *sch = (struct Qdisc*)arg; |
371 | struct sfq_sched_data *q = qdisc_priv(sch); | 387 | struct sfq_sched_data *q = qdisc_priv(sch); |
372 | 388 | ||
373 | q->perturbation = net_random()&0x1F; | 389 | get_random_bytes(&q->perturbation, 4); |
374 | 390 | ||
375 | if (q->perturb_period) { | 391 | if (q->perturb_period) |
376 | q->perturb_timer.expires = jiffies + q->perturb_period; | 392 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); |
377 | add_timer(&q->perturb_timer); | ||
378 | } | ||
379 | } | 393 | } |
380 | 394 | ||
381 | static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | 395 | static int sfq_change(struct Qdisc *sch, struct rtattr *opt) |
@@ -391,7 +405,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | |||
391 | q->quantum = ctl->quantum ? : psched_mtu(sch->dev); | 405 | q->quantum = ctl->quantum ? : psched_mtu(sch->dev); |
392 | q->perturb_period = ctl->perturb_period*HZ; | 406 | q->perturb_period = ctl->perturb_period*HZ; |
393 | if (ctl->limit) | 407 | if (ctl->limit) |
394 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 2); | 408 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); |
395 | 409 | ||
396 | qlen = sch->q.qlen; | 410 | qlen = sch->q.qlen; |
397 | while (sch->q.qlen > q->limit) | 411 | while (sch->q.qlen > q->limit) |
@@ -400,8 +414,8 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | |||
400 | 414 | ||
401 | del_timer(&q->perturb_timer); | 415 | del_timer(&q->perturb_timer); |
402 | if (q->perturb_period) { | 416 | if (q->perturb_period) { |
403 | q->perturb_timer.expires = jiffies + q->perturb_period; | 417 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); |
404 | add_timer(&q->perturb_timer); | 418 | get_random_bytes(&q->perturbation, 4); |
405 | } | 419 | } |
406 | sch_tree_unlock(sch); | 420 | sch_tree_unlock(sch); |
407 | return 0; | 421 | return 0; |
@@ -423,12 +437,13 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt) | |||
423 | q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; | 437 | q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; |
424 | q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; | 438 | q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; |
425 | } | 439 | } |
426 | q->limit = SFQ_DEPTH - 2; | 440 | q->limit = SFQ_DEPTH - 1; |
427 | q->max_depth = 0; | 441 | q->max_depth = 0; |
428 | q->tail = SFQ_DEPTH; | 442 | q->tail = SFQ_DEPTH; |
429 | if (opt == NULL) { | 443 | if (opt == NULL) { |
430 | q->quantum = psched_mtu(sch->dev); | 444 | q->quantum = psched_mtu(sch->dev); |
431 | q->perturb_period = 0; | 445 | q->perturb_period = 0; |
446 | get_random_bytes(&q->perturbation, 4); | ||
432 | } else { | 447 | } else { |
433 | int err = sfq_change(sch, opt); | 448 | int err = sfq_change(sch, opt); |
434 | if (err) | 449 | if (err) |