aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2010-06-16 09:19:36 -0400
committerSteve French <sfrench@us.ibm.com>2010-06-16 09:19:36 -0400
commit0933a95dfdb1ae5c93e1ede5899f35acc2bb244d (patch)
tree55ac47b819a2a2084f82f9d823d9152ac2a7f2b3 /net/core
parent12420ac341533f3715b3deb788637568f22b78ff (diff)
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c25
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/pktgen.c2
3 files changed, 30 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index d03470f5260a..2b3bf53bc687 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2253 if (skb_rx_queue_recorded(skb)) { 2253 if (skb_rx_queue_recorded(skb)) {
2254 u16 index = skb_get_rx_queue(skb); 2254 u16 index = skb_get_rx_queue(skb);
2255 if (unlikely(index >= dev->num_rx_queues)) { 2255 if (unlikely(index >= dev->num_rx_queues)) {
2256 if (net_ratelimit()) { 2256 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2257 pr_warning("%s received packet on queue " 2257 "on queue %u, but number of RX queues is %u\n",
2258 "%u, but number of RX queues is %u\n", 2258 dev->name, index, dev->num_rx_queues);
2259 dev->name, index, dev->num_rx_queues);
2260 }
2261 goto done; 2259 goto done;
2262 } 2260 }
2263 rxqueue = dev->_rx + index; 2261 rxqueue = dev->_rx + index;
@@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
2812 if (!skb->skb_iif) 2810 if (!skb->skb_iif)
2813 skb->skb_iif = skb->dev->ifindex; 2811 skb->skb_iif = skb->dev->ifindex;
2814 2812
2813 /*
2814 * bonding note: skbs received on inactive slaves should only
2815 * be delivered to pkt handlers that are exact matches. Also
2816 * the deliver_no_wcard flag will be set. If packet handlers
2817 * are sensitive to duplicate packets these skbs will need to
2818 * be dropped at the handler. The vlan accel path may have
2819 * already set the deliver_no_wcard flag.
2820 */
2815 null_or_orig = NULL; 2821 null_or_orig = NULL;
2816 orig_dev = skb->dev; 2822 orig_dev = skb->dev;
2817 master = ACCESS_ONCE(orig_dev->master); 2823 master = ACCESS_ONCE(orig_dev->master);
2818 if (master) { 2824 if (skb->deliver_no_wcard)
2819 if (skb_bond_should_drop(skb, master)) 2825 null_or_orig = orig_dev;
2826 else if (master) {
2827 if (skb_bond_should_drop(skb, master)) {
2828 skb->deliver_no_wcard = 1;
2820 null_or_orig = orig_dev; /* deliver only exact match */ 2829 null_or_orig = orig_dev; /* deliver only exact match */
2821 else 2830 } else
2822 skb->dev = master; 2831 skb->dev = master;
2823 } 2832 }
2824 2833
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
107 107
108/* Protects against soft lockup during large deletion */ 108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT; 109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
110 111
111static void est_timer(unsigned long arg) 112static void est_timer(unsigned long arg)
112{ 113{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
201 * 202 *
202 * Returns 0 on success or a negative error code. 203 * Returns 0 on success or a negative error code.
203 * 204 *
204 * NOTE: Called under rtnl_mutex
205 */ 205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207 struct gnet_stats_rate_est *rate_est, 207 struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock);
235 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
242 243
243 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock);
245 247
246 return 0; 248 return 0;
247} 249}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
261 * 263 *
262 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
263 * 265 *
264 * NOTE: Called under rtnl_mutex
265 */ 266 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267 struct gnet_stats_rate_est *rate_est) 268 struct gnet_stats_rate_est *rate_est)
268{ 269{
269 struct gen_estimator *e; 270 struct gen_estimator *e;
270 271
272 spin_lock(&est_tree_lock);
271 while ((e = gen_find_node(bstats, rate_est))) { 273 while ((e = gen_find_node(bstats, rate_est))) {
272 rb_erase(&e->node, &est_root); 274 rb_erase(&e->node, &est_root);
273 275
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
278 list_del_rcu(&e->list); 280 list_del_rcu(&e->list);
279 call_rcu(&e->e_rcu, __gen_kill_estimator); 281 call_rcu(&e->e_rcu, __gen_kill_estimator);
280 } 282 }
283 spin_unlock(&est_tree_lock);
281} 284}
282EXPORT_SYMBOL(gen_kill_estimator); 285EXPORT_SYMBOL(gen_kill_estimator);
283 286
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
312bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
313 const struct gnet_stats_rate_est *rate_est) 316 const struct gnet_stats_rate_est *rate_est)
314{ 317{
318 bool res;
319
315 ASSERT_RTNL(); 320 ASSERT_RTNL();
316 321
317 return gen_find_node(bstats, rate_est) != NULL; 322 spin_lock(&est_tree_lock);
323 res = gen_find_node(bstats, rate_est) != NULL;
324 spin_unlock(&est_tree_lock);
325
326 return res;
318} 327}
319EXPORT_SYMBOL(gen_estimator_active); 328EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2170 end_time = ktime_now(); 2170 end_time = ktime_now();
2171 2171
2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2173 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2174} 2174}
2175 2175
2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)