diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-06-11 17:20:03 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-06-11 17:20:03 -0400 |
| commit | 4cea8706c39023f5f721c88dd0ae17a097a39c98 (patch) | |
| tree | 7eb589399dc10944cde2802d68adb5511405446f /net | |
| parent | 7ae1277a5202109a31d8f81ac99d4a53278dab84 (diff) | |
| parent | e79aa8671033535c2e9ffc0a68010ae49ed5734c (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
wimax/i2400m: fix missing endian correction read in fw loader
net8139: fix a race at the end of NAPI
pktgen: Fix accuracy of inter-packet delay.
pkt_sched: gen_estimator: add a new lock
net: deliver skbs on inactive slaves to exact matches
ipv6: fix ICMP6_MIB_OUTERRORS
r8169: fix mdio_read and update mdio_write according to hw specs
gianfar: Revive the driver for eTSEC devices (disable timestamping)
caif: fix a couple range checks
phylib: Add support for the LXT973 phy.
net: Print num_rx_queues imbalance warning only when there are allocated queues
Diffstat (limited to 'net')
| -rw-r--r-- | net/8021q/vlan_core.c | 4 | ||||
| -rw-r--r-- | net/caif/cfrfml.c | 2 | ||||
| -rw-r--r-- | net/caif/cfveil.c | 2 | ||||
| -rw-r--r-- | net/core/dev.c | 25 | ||||
| -rw-r--r-- | net/core/gen_estimator.c | 15 | ||||
| -rw-r--r-- | net/core/pktgen.c | 2 | ||||
| -rw-r--r-- | net/ipv6/icmp.c | 4 |
7 files changed, 36 insertions, 18 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bd537fc10254..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
| @@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
| 12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
| 13 | 13 | ||
| 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
| 15 | goto drop; | 15 | skb->deliver_no_wcard = 1; |
| 16 | 16 | ||
| 17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
| 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
| @@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
| 84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
| 85 | 85 | ||
| 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
| 87 | goto drop; | 87 | skb->deliver_no_wcard = 1; |
| 88 | 88 | ||
| 89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
| 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
| @@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
| 83 | if (!cfsrvl_ready(service, &ret)) | 83 | if (!cfsrvl_ready(service, &ret)) |
| 84 | return ret; | 84 | return ret; |
| 85 | 85 | ||
| 86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 86 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
| 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", |
| 88 | __func__, cfpkt_getlen(pkt)); | 88 | __func__, cfpkt_getlen(pkt)); |
| 89 | return -EOVERFLOW; | 89 | return -EOVERFLOW; |
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c | |||
| @@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
| 84 | return ret; | 84 | return ret; |
| 85 | caif_assert(layr->dn != NULL); | 85 | caif_assert(layr->dn != NULL); |
| 86 | caif_assert(layr->dn->transmit != NULL); | 86 | caif_assert(layr->dn->transmit != NULL); |
| 87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 87 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
| 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", |
| 89 | __func__, cfpkt_getlen(pkt)); | 89 | __func__, cfpkt_getlen(pkt)); |
| 90 | return -EOVERFLOW; | 90 | return -EOVERFLOW; |
diff --git a/net/core/dev.c b/net/core/dev.c index d03470f5260a..2b3bf53bc687 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
| 2253 | if (skb_rx_queue_recorded(skb)) { | 2253 | if (skb_rx_queue_recorded(skb)) { |
| 2254 | u16 index = skb_get_rx_queue(skb); | 2254 | u16 index = skb_get_rx_queue(skb); |
| 2255 | if (unlikely(index >= dev->num_rx_queues)) { | 2255 | if (unlikely(index >= dev->num_rx_queues)) { |
| 2256 | if (net_ratelimit()) { | 2256 | WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " |
| 2257 | pr_warning("%s received packet on queue " | 2257 | "on queue %u, but number of RX queues is %u\n", |
| 2258 | "%u, but number of RX queues is %u\n", | 2258 | dev->name, index, dev->num_rx_queues); |
| 2259 | dev->name, index, dev->num_rx_queues); | ||
| 2260 | } | ||
| 2261 | goto done; | 2259 | goto done; |
| 2262 | } | 2260 | } |
| 2263 | rxqueue = dev->_rx + index; | 2261 | rxqueue = dev->_rx + index; |
| @@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
| 2812 | if (!skb->skb_iif) | 2810 | if (!skb->skb_iif) |
| 2813 | skb->skb_iif = skb->dev->ifindex; | 2811 | skb->skb_iif = skb->dev->ifindex; |
| 2814 | 2812 | ||
| 2813 | /* | ||
| 2814 | * bonding note: skbs received on inactive slaves should only | ||
| 2815 | * be delivered to pkt handlers that are exact matches. Also | ||
| 2816 | * the deliver_no_wcard flag will be set. If packet handlers | ||
| 2817 | * are sensitive to duplicate packets these skbs will need to | ||
| 2818 | * be dropped at the handler. The vlan accel path may have | ||
| 2819 | * already set the deliver_no_wcard flag. | ||
| 2820 | */ | ||
| 2815 | null_or_orig = NULL; | 2821 | null_or_orig = NULL; |
| 2816 | orig_dev = skb->dev; | 2822 | orig_dev = skb->dev; |
| 2817 | master = ACCESS_ONCE(orig_dev->master); | 2823 | master = ACCESS_ONCE(orig_dev->master); |
| 2818 | if (master) { | 2824 | if (skb->deliver_no_wcard) |
| 2819 | if (skb_bond_should_drop(skb, master)) | 2825 | null_or_orig = orig_dev; |
| 2826 | else if (master) { | ||
| 2827 | if (skb_bond_should_drop(skb, master)) { | ||
| 2828 | skb->deliver_no_wcard = 1; | ||
| 2820 | null_or_orig = orig_dev; /* deliver only exact match */ | 2829 | null_or_orig = orig_dev; /* deliver only exact match */ |
| 2821 | else | 2830 | } else |
| 2822 | skb->dev = master; | 2831 | skb->dev = master; |
| 2823 | } | 2832 | } |
| 2824 | 2833 | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..785e5276a300 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
| @@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); | |||
| 107 | 107 | ||
| 108 | /* Protects against soft lockup during large deletion */ | 108 | /* Protects against soft lockup during large deletion */ |
| 109 | static struct rb_root est_root = RB_ROOT; | 109 | static struct rb_root est_root = RB_ROOT; |
| 110 | static DEFINE_SPINLOCK(est_tree_lock); | ||
| 110 | 111 | ||
| 111 | static void est_timer(unsigned long arg) | 112 | static void est_timer(unsigned long arg) |
| 112 | { | 113 | { |
| @@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
| 201 | * | 202 | * |
| 202 | * Returns 0 on success or a negative error code. | 203 | * Returns 0 on success or a negative error code. |
| 203 | * | 204 | * |
| 204 | * NOTE: Called under rtnl_mutex | ||
| 205 | */ | 205 | */ |
| 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
| 207 | struct gnet_stats_rate_est *rate_est, | 207 | struct gnet_stats_rate_est *rate_est, |
| @@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
| 233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
| 234 | 234 | ||
| 235 | spin_lock(&est_tree_lock); | ||
| 235 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
| 236 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
| 237 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
| @@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 242 | 243 | ||
| 243 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
| 244 | gen_add_node(est); | 245 | gen_add_node(est); |
| 246 | spin_unlock(&est_tree_lock); | ||
| 245 | 247 | ||
| 246 | return 0; | 248 | return 0; |
| 247 | } | 249 | } |
| @@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
| 261 | * | 263 | * |
| 262 | * Removes the rate estimator specified by &bstats and &rate_est. | 264 | * Removes the rate estimator specified by &bstats and &rate_est. |
| 263 | * | 265 | * |
| 264 | * NOTE: Called under rtnl_mutex | ||
| 265 | */ | 266 | */ |
| 266 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | 267 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
| 267 | struct gnet_stats_rate_est *rate_est) | 268 | struct gnet_stats_rate_est *rate_est) |
| 268 | { | 269 | { |
| 269 | struct gen_estimator *e; | 270 | struct gen_estimator *e; |
| 270 | 271 | ||
| 272 | spin_lock(&est_tree_lock); | ||
| 271 | while ((e = gen_find_node(bstats, rate_est))) { | 273 | while ((e = gen_find_node(bstats, rate_est))) { |
| 272 | rb_erase(&e->node, &est_root); | 274 | rb_erase(&e->node, &est_root); |
| 273 | 275 | ||
| @@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
| 278 | list_del_rcu(&e->list); | 280 | list_del_rcu(&e->list); |
| 279 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 281 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
| 280 | } | 282 | } |
| 283 | spin_unlock(&est_tree_lock); | ||
| 281 | } | 284 | } |
| 282 | EXPORT_SYMBOL(gen_kill_estimator); | 285 | EXPORT_SYMBOL(gen_kill_estimator); |
| 283 | 286 | ||
| @@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
| 312 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | 315 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
| 313 | const struct gnet_stats_rate_est *rate_est) | 316 | const struct gnet_stats_rate_est *rate_est) |
| 314 | { | 317 | { |
| 318 | bool res; | ||
| 319 | |||
| 315 | ASSERT_RTNL(); | 320 | ASSERT_RTNL(); |
| 316 | 321 | ||
| 317 | return gen_find_node(bstats, rate_est) != NULL; | 322 | spin_lock(&est_tree_lock); |
| 323 | res = gen_find_node(bstats, rate_est) != NULL; | ||
| 324 | spin_unlock(&est_tree_lock); | ||
| 325 | |||
| 326 | return res; | ||
| 318 | } | 327 | } |
| 319 | EXPORT_SYMBOL(gen_estimator_active); | 328 | EXPORT_SYMBOL(gen_estimator_active); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..1dacd7ba8dbb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
| 2170 | end_time = ktime_now(); | 2170 | end_time = ktime_now(); |
| 2171 | 2171 | ||
| 2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
| 2173 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | 2173 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
| 2174 | } | 2174 | } |
| 2175 | 2175 | ||
| 2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
| @@ -483,7 +483,7 @@ route_done: | |||
| 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
| 484 | MSG_DONTWAIT, np->dontfrag); | 484 | MSG_DONTWAIT, np->dontfrag); |
| 485 | if (err) { | 485 | if (err) { |
| 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
| 487 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
| 488 | goto out_put; | 488 | goto out_put; |
| 489 | } | 489 | } |
| @@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
| 565 | np->dontfrag); | 565 | np->dontfrag); |
| 566 | 566 | ||
| 567 | if (err) { | 567 | if (err) { |
| 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
| 569 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
| 570 | goto out_put; | 570 | goto out_put; |
| 571 | } | 571 | } |
