diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-19 14:54:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-19 14:54:39 -0400 |
commit | 804b908adfcffe3831621acb6c8a776585983d2a (patch) | |
tree | 9664d4395ae8e590e50c568225ea88435383ed99 /net | |
parent | 1b3e4c706c19dec10b11dac1b23071e3e4b262ad (diff) | |
parent | ce0e32e65f70337e0732c97499b643205fa8ea31 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[NET]: Fix possible dev_deactivate race condition
[INET]: Justification for local port range robustness.
[PACKET]: Kill unused pg_vec_endpage() function
[NET]: QoS/Sched as menuconfig
[NET]: Fix bug in sk_filter race cures.
[PATCH] mac80211: make ieee802_11_parse_elems return void
Diffstat (limited to 'net')
-rw-r--r-- | net/core/filter.c | 3 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 2 | ||||
-rw-r--r-- | net/ipv4/inet_hashtables.c | 2 | ||||
-rw-r--r-- | net/ipv4/sysctl_net_ipv4.c | 4 | ||||
-rw-r--r-- | net/ipv4/udp.c | 5 | ||||
-rw-r--r-- | net/ipv6/inet6_hashtables.c | 2 | ||||
-rw-r--r-- | net/mac80211/ieee80211_sta.c | 55 | ||||
-rw-r--r-- | net/packet/af_packet.c | 5 | ||||
-rw-r--r-- | net/sched/Kconfig | 10 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 26 |
10 files changed, 45 insertions, 69 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index 1f0068eae50..e0a06942c02 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -447,7 +447,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
447 | rcu_assign_pointer(sk->sk_filter, fp); | 447 | rcu_assign_pointer(sk->sk_filter, fp); |
448 | rcu_read_unlock_bh(); | 448 | rcu_read_unlock_bh(); |
449 | 449 | ||
450 | sk_filter_delayed_uncharge(sk, old_fp); | 450 | if (old_fp) |
451 | sk_filter_delayed_uncharge(sk, old_fp); | ||
451 | return 0; | 452 | return 0; |
452 | } | 453 | } |
453 | 454 | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 3cef12835c4..8fb6ca23700 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -93,7 +93,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo, | |||
93 | int remaining, rover, low, high; | 93 | int remaining, rover, low, high; |
94 | 94 | ||
95 | inet_get_local_port_range(&low, &high); | 95 | inet_get_local_port_range(&low, &high); |
96 | remaining = high - low; | 96 | remaining = (high - low) + 1; |
97 | rover = net_random() % remaining + low; | 97 | rover = net_random() % remaining + low; |
98 | 98 | ||
99 | do { | 99 | do { |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index fac6398e436..16eecc7046a 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -286,7 +286,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
286 | struct inet_timewait_sock *tw = NULL; | 286 | struct inet_timewait_sock *tw = NULL; |
287 | 287 | ||
288 | inet_get_local_port_range(&low, &high); | 288 | inet_get_local_port_range(&low, &high); |
289 | remaining = high - low; | 289 | remaining = (high - low) + 1; |
290 | 290 | ||
291 | local_bh_disable(); | 291 | local_bh_disable(); |
292 | for (i = 1; i <= remaining; i++) { | 292 | for (i = 1; i <= remaining; i++) { |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index c78acc1a7f1..ffddd2b4535 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -122,7 +122,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp, | |||
122 | ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos); | 122 | ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos); |
123 | 123 | ||
124 | if (write && ret == 0) { | 124 | if (write && ret == 0) { |
125 | if (range[1] <= range[0]) | 125 | if (range[1] < range[0]) |
126 | ret = -EINVAL; | 126 | ret = -EINVAL; |
127 | else | 127 | else |
128 | set_local_port_range(range); | 128 | set_local_port_range(range); |
@@ -150,7 +150,7 @@ static int ipv4_sysctl_local_port_range(ctl_table *table, int __user *name, | |||
150 | 150 | ||
151 | ret = sysctl_intvec(&tmp, name, nlen, oldval, oldlenp, newval, newlen); | 151 | ret = sysctl_intvec(&tmp, name, nlen, oldval, oldlenp, newval, newlen); |
152 | if (ret == 0 && newval && newlen) { | 152 | if (ret == 0 && newval && newlen) { |
153 | if (range[1] <= range[0]) | 153 | if (range[1] < range[0]) |
154 | ret = -EINVAL; | 154 | ret = -EINVAL; |
155 | else | 155 | else |
156 | set_local_port_range(range); | 156 | set_local_port_range(range); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cb9fc58efb2..35d2b0e9e10 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -147,13 +147,14 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
147 | write_lock_bh(&udp_hash_lock); | 147 | write_lock_bh(&udp_hash_lock); |
148 | 148 | ||
149 | if (!snum) { | 149 | if (!snum) { |
150 | int i, low, high; | 150 | int i, low, high, remaining; |
151 | unsigned rover, best, best_size_so_far; | 151 | unsigned rover, best, best_size_so_far; |
152 | 152 | ||
153 | inet_get_local_port_range(&low, &high); | 153 | inet_get_local_port_range(&low, &high); |
154 | remaining = (high - low) + 1; | ||
154 | 155 | ||
155 | best_size_so_far = UINT_MAX; | 156 | best_size_so_far = UINT_MAX; |
156 | best = rover = net_random() % (high - low) + low; | 157 | best = rover = net_random() % remaining + low; |
157 | 158 | ||
158 | /* 1st pass: look for empty (or shortest) hash chain */ | 159 | /* 1st pass: look for empty (or shortest) hash chain */ |
159 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { | 160 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 1c2c2765543..d6f1026f194 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -261,7 +261,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, | |||
261 | struct inet_timewait_sock *tw = NULL; | 261 | struct inet_timewait_sock *tw = NULL; |
262 | 262 | ||
263 | inet_get_local_port_range(&low, &high); | 263 | inet_get_local_port_range(&low, &high); |
264 | remaining = high - low; | 264 | remaining = (high - low) + 1; |
265 | 265 | ||
266 | local_bh_disable(); | 266 | local_bh_disable(); |
267 | for (i = 1; i <= remaining; i++) { | 267 | for (i = 1; i <= remaining; i++) { |
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index db81aef6177..f7ffeec3913 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c | |||
@@ -108,14 +108,11 @@ struct ieee802_11_elems { | |||
108 | u8 wmm_param_len; | 108 | u8 wmm_param_len; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | enum ParseRes { ParseOK = 0, ParseUnknown = 1, ParseFailed = -1 }; | 111 | static void ieee802_11_parse_elems(u8 *start, size_t len, |
112 | 112 | struct ieee802_11_elems *elems) | |
113 | static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len, | ||
114 | struct ieee802_11_elems *elems) | ||
115 | { | 113 | { |
116 | size_t left = len; | 114 | size_t left = len; |
117 | u8 *pos = start; | 115 | u8 *pos = start; |
118 | int unknown = 0; | ||
119 | 116 | ||
120 | memset(elems, 0, sizeof(*elems)); | 117 | memset(elems, 0, sizeof(*elems)); |
121 | 118 | ||
@@ -126,15 +123,8 @@ static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len, | |||
126 | elen = *pos++; | 123 | elen = *pos++; |
127 | left -= 2; | 124 | left -= 2; |
128 | 125 | ||
129 | if (elen > left) { | 126 | if (elen > left) |
130 | #if 0 | 127 | return; |
131 | if (net_ratelimit()) | ||
132 | printk(KERN_DEBUG "IEEE 802.11 element parse " | ||
133 | "failed (id=%d elen=%d left=%d)\n", | ||
134 | id, elen, left); | ||
135 | #endif | ||
136 | return ParseFailed; | ||
137 | } | ||
138 | 128 | ||
139 | switch (id) { | 129 | switch (id) { |
140 | case WLAN_EID_SSID: | 130 | case WLAN_EID_SSID: |
@@ -201,28 +191,15 @@ static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len, | |||
201 | elems->ext_supp_rates_len = elen; | 191 | elems->ext_supp_rates_len = elen; |
202 | break; | 192 | break; |
203 | default: | 193 | default: |
204 | #if 0 | ||
205 | printk(KERN_DEBUG "IEEE 802.11 element parse ignored " | ||
206 | "unknown element (id=%d elen=%d)\n", | ||
207 | id, elen); | ||
208 | #endif | ||
209 | unknown++; | ||
210 | break; | 194 | break; |
211 | } | 195 | } |
212 | 196 | ||
213 | left -= elen; | 197 | left -= elen; |
214 | pos += elen; | 198 | pos += elen; |
215 | } | 199 | } |
216 | |||
217 | /* Do not trigger error if left == 1 as Apple Airport base stations | ||
218 | * send AssocResps that are one spurious byte too long. */ | ||
219 | |||
220 | return unknown ? ParseUnknown : ParseOK; | ||
221 | } | 200 | } |
222 | 201 | ||
223 | 202 | ||
224 | |||
225 | |||
226 | static int ecw2cw(int ecw) | 203 | static int ecw2cw(int ecw) |
227 | { | 204 | { |
228 | int cw = 1; | 205 | int cw = 1; |
@@ -931,12 +908,7 @@ static void ieee80211_auth_challenge(struct net_device *dev, | |||
931 | 908 | ||
932 | printk(KERN_DEBUG "%s: replying to auth challenge\n", dev->name); | 909 | printk(KERN_DEBUG "%s: replying to auth challenge\n", dev->name); |
933 | pos = mgmt->u.auth.variable; | 910 | pos = mgmt->u.auth.variable; |
934 | if (ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems) | 911 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); |
935 | == ParseFailed) { | ||
936 | printk(KERN_DEBUG "%s: failed to parse Auth(challenge)\n", | ||
937 | dev->name); | ||
938 | return; | ||
939 | } | ||
940 | if (!elems.challenge) { | 912 | if (!elems.challenge) { |
941 | printk(KERN_DEBUG "%s: no challenge IE in shared key auth " | 913 | printk(KERN_DEBUG "%s: no challenge IE in shared key auth " |
942 | "frame\n", dev->name); | 914 | "frame\n", dev->name); |
@@ -1230,12 +1202,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, | |||
1230 | aid &= ~(BIT(15) | BIT(14)); | 1202 | aid &= ~(BIT(15) | BIT(14)); |
1231 | 1203 | ||
1232 | pos = mgmt->u.assoc_resp.variable; | 1204 | pos = mgmt->u.assoc_resp.variable; |
1233 | if (ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems) | 1205 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); |
1234 | == ParseFailed) { | ||
1235 | printk(KERN_DEBUG "%s: failed to parse AssocResp\n", | ||
1236 | dev->name); | ||
1237 | return; | ||
1238 | } | ||
1239 | 1206 | ||
1240 | if (!elems.supp_rates) { | 1207 | if (!elems.supp_rates) { |
1241 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", | 1208 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", |
@@ -1459,7 +1426,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
1459 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1426 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1460 | struct ieee802_11_elems elems; | 1427 | struct ieee802_11_elems elems; |
1461 | size_t baselen; | 1428 | size_t baselen; |
1462 | int channel, invalid = 0, clen; | 1429 | int channel, clen; |
1463 | struct ieee80211_sta_bss *bss; | 1430 | struct ieee80211_sta_bss *bss; |
1464 | struct sta_info *sta; | 1431 | struct sta_info *sta; |
1465 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1432 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
@@ -1505,9 +1472,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
1505 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 1472 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
1506 | } | 1473 | } |
1507 | 1474 | ||
1508 | if (ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, | 1475 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); |
1509 | &elems) == ParseFailed) | ||
1510 | invalid = 1; | ||
1511 | 1476 | ||
1512 | if (sdata->type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && | 1477 | if (sdata->type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && |
1513 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && | 1478 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && |
@@ -1724,9 +1689,7 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
1724 | if (baselen > len) | 1689 | if (baselen > len) |
1725 | return; | 1690 | return; |
1726 | 1691 | ||
1727 | if (ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, | 1692 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); |
1728 | &elems) == ParseFailed) | ||
1729 | return; | ||
1730 | 1693 | ||
1731 | if (elems.erp_info && elems.erp_info_len >= 1) | 1694 | if (elems.erp_info && elems.erp_info_len >= 1) |
1732 | ieee80211_handle_erp_ie(dev, elems.erp_info[0]); | 1695 | ieee80211_handle_erp_ie(dev, elems.erp_info[0]); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e11000a8e95..d0936506b73 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1623,11 +1623,6 @@ static struct vm_operations_struct packet_mmap_ops = { | |||
1623 | .close =packet_mm_close, | 1623 | .close =packet_mm_close, |
1624 | }; | 1624 | }; |
1625 | 1625 | ||
1626 | static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order) | ||
1627 | { | ||
1628 | return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1); | ||
1629 | } | ||
1630 | |||
1631 | static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) | 1626 | static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) |
1632 | { | 1627 | { |
1633 | int i; | 1628 | int i; |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 92435a882fa..9c15c4888d1 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -2,9 +2,7 @@ | |||
2 | # Traffic control configuration. | 2 | # Traffic control configuration. |
3 | # | 3 | # |
4 | 4 | ||
5 | menu "QoS and/or fair queueing" | 5 | menuconfig NET_SCHED |
6 | |||
7 | config NET_SCHED | ||
8 | bool "QoS and/or fair queueing" | 6 | bool "QoS and/or fair queueing" |
9 | select NET_SCH_FIFO | 7 | select NET_SCH_FIFO |
10 | ---help--- | 8 | ---help--- |
@@ -41,9 +39,6 @@ config NET_SCHED | |||
41 | The available schedulers are listed in the following questions; you | 39 | The available schedulers are listed in the following questions; you |
42 | can say Y to as many as you like. If unsure, say N now. | 40 | can say Y to as many as you like. If unsure, say N now. |
43 | 41 | ||
44 | config NET_SCH_FIFO | ||
45 | bool | ||
46 | |||
47 | if NET_SCHED | 42 | if NET_SCHED |
48 | 43 | ||
49 | comment "Queueing/Scheduling" | 44 | comment "Queueing/Scheduling" |
@@ -500,4 +495,5 @@ config NET_CLS_IND | |||
500 | 495 | ||
501 | endif # NET_SCHED | 496 | endif # NET_SCHED |
502 | 497 | ||
503 | endmenu | 498 | config NET_SCH_FIFO |
499 | bool | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e01d57692c9..fa1a6f45dc4 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -556,6 +556,7 @@ void dev_deactivate(struct net_device *dev) | |||
556 | { | 556 | { |
557 | struct Qdisc *qdisc; | 557 | struct Qdisc *qdisc; |
558 | struct sk_buff *skb; | 558 | struct sk_buff *skb; |
559 | int running; | ||
559 | 560 | ||
560 | spin_lock_bh(&dev->queue_lock); | 561 | spin_lock_bh(&dev->queue_lock); |
561 | qdisc = dev->qdisc; | 562 | qdisc = dev->qdisc; |
@@ -571,12 +572,31 @@ void dev_deactivate(struct net_device *dev) | |||
571 | 572 | ||
572 | dev_watchdog_down(dev); | 573 | dev_watchdog_down(dev); |
573 | 574 | ||
574 | /* Wait for outstanding dev_queue_xmit calls. */ | 575 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ |
575 | synchronize_rcu(); | 576 | synchronize_rcu(); |
576 | 577 | ||
577 | /* Wait for outstanding qdisc_run calls. */ | 578 | /* Wait for outstanding qdisc_run calls. */ |
578 | while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) | 579 | do { |
579 | yield(); | 580 | while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) |
581 | yield(); | ||
582 | |||
583 | /* | ||
584 | * Double-check inside queue lock to ensure that all effects | ||
585 | * of the queue run are visible when we return. | ||
586 | */ | ||
587 | spin_lock_bh(&dev->queue_lock); | ||
588 | running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | ||
589 | spin_unlock_bh(&dev->queue_lock); | ||
590 | |||
591 | /* | ||
592 | * The running flag should never be set at this point because | ||
593 | * we've already set dev->qdisc to noop_qdisc *inside* the same | ||
594 | * pair of spin locks. That is, if any qdisc_run starts after | ||
595 | * our initial test it should see the noop_qdisc and then | ||
596 | * clear the RUNNING bit before dropping the queue lock. So | ||
597 | * if it is set here then we've found a bug. | ||
598 | */ | ||
599 | } while (WARN_ON_ONCE(running)); | ||
580 | } | 600 | } |
581 | 601 | ||
582 | void dev_init_scheduler(struct net_device *dev) | 602 | void dev_init_scheduler(struct net_device *dev) |