aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-06-11 16:32:31 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-11 16:32:31 -0400
commit62522d36d74a843e78d17f2dffc90468c6762803 (patch)
treed9d21de6724425c1b0ba12991e0865556aeda6b4 /net/core
parenta71fba97295db924c0b90266e9833e5059fead24 (diff)
parente79aa8671033535c2e9ffc0a68010ae49ed5734c (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c25
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/pktgen.c2
3 files changed, 30 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6f330cee79a6..277844901ce3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2273,11 +2273,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2273 if (skb_rx_queue_recorded(skb)) { 2273 if (skb_rx_queue_recorded(skb)) {
2274 u16 index = skb_get_rx_queue(skb); 2274 u16 index = skb_get_rx_queue(skb);
2275 if (unlikely(index >= dev->num_rx_queues)) { 2275 if (unlikely(index >= dev->num_rx_queues)) {
2276 if (net_ratelimit()) { 2276 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2277 pr_warning("%s received packet on queue " 2277 "on queue %u, but number of RX queues is %u\n",
2278 "%u, but number of RX queues is %u\n", 2278 dev->name, index, dev->num_rx_queues);
2279 dev->name, index, dev->num_rx_queues);
2280 }
2281 goto done; 2279 goto done;
2282 } 2280 }
2283 rxqueue = dev->_rx + index; 2281 rxqueue = dev->_rx + index;
@@ -2815,13 +2813,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
2815 if (!skb->skb_iif) 2813 if (!skb->skb_iif)
2816 skb->skb_iif = skb->dev->ifindex; 2814 skb->skb_iif = skb->dev->ifindex;
2817 2815
2816 /*
2817 * bonding note: skbs received on inactive slaves should only
2818 * be delivered to pkt handlers that are exact matches. Also
2819 * the deliver_no_wcard flag will be set. If packet handlers
2820 * are sensitive to duplicate packets these skbs will need to
2821 * be dropped at the handler. The vlan accel path may have
2822 * already set the deliver_no_wcard flag.
2823 */
2818 null_or_orig = NULL; 2824 null_or_orig = NULL;
2819 orig_dev = skb->dev; 2825 orig_dev = skb->dev;
2820 master = ACCESS_ONCE(orig_dev->master); 2826 master = ACCESS_ONCE(orig_dev->master);
2821 if (master) { 2827 if (skb->deliver_no_wcard)
2822 if (skb_bond_should_drop(skb, master)) 2828 null_or_orig = orig_dev;
2829 else if (master) {
2830 if (skb_bond_should_drop(skb, master)) {
2831 skb->deliver_no_wcard = 1;
2823 null_or_orig = orig_dev; /* deliver only exact match */ 2832 null_or_orig = orig_dev; /* deliver only exact match */
2824 else 2833 } else
2825 skb->dev = master; 2834 skb->dev = master;
2826 } 2835 }
2827 2836
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
107 107
108/* Protects against soft lockup during large deletion */ 108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT; 109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
110 111
111static void est_timer(unsigned long arg) 112static void est_timer(unsigned long arg)
112{ 113{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
201 * 202 *
202 * Returns 0 on success or a negative error code. 203 * Returns 0 on success or a negative error code.
203 * 204 *
204 * NOTE: Called under rtnl_mutex
205 */ 205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207 struct gnet_stats_rate_est *rate_est, 207 struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock);
235 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
242 243
243 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock);
245 247
246 return 0; 248 return 0;
247} 249}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
261 * 263 *
262 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
263 * 265 *
264 * NOTE: Called under rtnl_mutex
265 */ 266 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267 struct gnet_stats_rate_est *rate_est) 268 struct gnet_stats_rate_est *rate_est)
268{ 269{
269 struct gen_estimator *e; 270 struct gen_estimator *e;
270 271
272 spin_lock(&est_tree_lock);
271 while ((e = gen_find_node(bstats, rate_est))) { 273 while ((e = gen_find_node(bstats, rate_est))) {
272 rb_erase(&e->node, &est_root); 274 rb_erase(&e->node, &est_root);
273 275
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
278 list_del_rcu(&e->list); 280 list_del_rcu(&e->list);
279 call_rcu(&e->e_rcu, __gen_kill_estimator); 281 call_rcu(&e->e_rcu, __gen_kill_estimator);
280 } 282 }
283 spin_unlock(&est_tree_lock);
281} 284}
282EXPORT_SYMBOL(gen_kill_estimator); 285EXPORT_SYMBOL(gen_kill_estimator);
283 286
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
312bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
313 const struct gnet_stats_rate_est *rate_est) 316 const struct gnet_stats_rate_est *rate_est)
314{ 317{
318 bool res;
319
315 ASSERT_RTNL(); 320 ASSERT_RTNL();
316 321
317 return gen_find_node(bstats, rate_est) != NULL; 322 spin_lock(&est_tree_lock);
323 res = gen_find_node(bstats, rate_est) != NULL;
324 spin_unlock(&est_tree_lock);
325
326 return res;
318} 327}
319EXPORT_SYMBOL(gen_estimator_active); 328EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2170 end_time = ktime_now(); 2170 end_time = ktime_now();
2171 2171
2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2173 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2174} 2174}
2175 2175
2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)