aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-09-10 01:27:33 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 01:27:33 -0400
commite548833df83c3554229eff0672900bfe958b45fd (patch)
tree85efc4a76dc356593d6d394776aeb845dc580fb6 /net/core
parentcbd9da7be869f676afc204e1a664163778c770bd (diff)
parent053d8f6622701f849fda2ca2c9ae596c13599ba9 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/mac80211/main.c
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/gen_estimator.c12
-rw-r--r--net/core/skbuff.c6
3 files changed, 19 insertions, 15 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index cdbbea39c549..fc2dc933bee5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2056,16 +2056,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2056 struct sk_buff *skb) 2056 struct sk_buff *skb)
2057{ 2057{
2058 int queue_index; 2058 int queue_index;
2059 struct sock *sk = skb->sk; 2059 const struct net_device_ops *ops = dev->netdev_ops;
2060 2060
2061 queue_index = sk_tx_queue_get(sk); 2061 if (ops->ndo_select_queue) {
2062 if (queue_index < 0) { 2062 queue_index = ops->ndo_select_queue(dev, skb);
2063 const struct net_device_ops *ops = dev->netdev_ops; 2063 queue_index = dev_cap_txqueue(dev, queue_index);
2064 } else {
2065 struct sock *sk = skb->sk;
2066 queue_index = sk_tx_queue_get(sk);
2067 if (queue_index < 0) {
2064 2068
2065 if (ops->ndo_select_queue) {
2066 queue_index = ops->ndo_select_queue(dev, skb);
2067 queue_index = dev_cap_txqueue(dev, queue_index);
2068 } else {
2069 queue_index = 0; 2069 queue_index = 0;
2070 if (dev->real_num_tx_queues > 1) 2070 if (dev->real_num_tx_queues > 1)
2071 queue_index = skb_tx_hash(dev, skb); 2071 queue_index = skb_tx_hash(dev, skb);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9fbe7f7429b0..6743146e4d6b 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock); 235 spin_lock_bh(&est_tree_lock);
236 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
237 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
238 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
243 243
244 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
245 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock); 246 spin_unlock_bh(&est_tree_lock);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
270{ 270{
271 struct gen_estimator *e; 271 struct gen_estimator *e;
272 272
273 spin_lock(&est_tree_lock); 273 spin_lock_bh(&est_tree_lock);
274 while ((e = gen_find_node(bstats, rate_est))) { 274 while ((e = gen_find_node(bstats, rate_est))) {
275 rb_erase(&e->node, &est_root); 275 rb_erase(&e->node, &est_root);
276 276
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
281 list_del_rcu(&e->list); 281 list_del_rcu(&e->list);
282 call_rcu(&e->e_rcu, __gen_kill_estimator); 282 call_rcu(&e->e_rcu, __gen_kill_estimator);
283 } 283 }
284 spin_unlock(&est_tree_lock); 284 spin_unlock_bh(&est_tree_lock);
285} 285}
286EXPORT_SYMBOL(gen_kill_estimator); 286EXPORT_SYMBOL(gen_kill_estimator);
287 287
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
320 320
321 ASSERT_RTNL(); 321 ASSERT_RTNL();
322 322
323 spin_lock(&est_tree_lock); 323 spin_lock_bh(&est_tree_lock);
324 res = gen_find_node(bstats, rate_est) != NULL; 324 res = gen_find_node(bstats, rate_est) != NULL;
325 spin_unlock(&est_tree_lock); 325 spin_unlock_bh(&est_tree_lock);
326 326
327 return res; 327 return res;
328} 328}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2d1bc761fe4b..752c1972b3a7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2569,6 +2569,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2569 __copy_skb_header(nskb, skb); 2569 __copy_skb_header(nskb, skb);
2570 nskb->mac_len = skb->mac_len; 2570 nskb->mac_len = skb->mac_len;
2571 2571
2572 /* nskb and skb might have different headroom */
2573 if (nskb->ip_summed == CHECKSUM_PARTIAL)
2574 nskb->csum_start += skb_headroom(nskb) - headroom;
2575
2572 skb_reset_mac_header(nskb); 2576 skb_reset_mac_header(nskb);
2573 skb_set_network_header(nskb, skb->mac_len); 2577 skb_set_network_header(nskb, skb->mac_len);
2574 nskb->transport_header = (nskb->network_header + 2578 nskb->transport_header = (nskb->network_header +
@@ -2699,7 +2703,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2699 return -E2BIG; 2703 return -E2BIG;
2700 2704
2701 headroom = skb_headroom(p); 2705 headroom = skb_headroom(p);
2702 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); 2706 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2703 if (unlikely(!nskb)) 2707 if (unlikely(!nskb))
2704 return -ENOMEM; 2708 return -ENOMEM;
2705 2709