diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 87 | ||||
-rw-r--r-- | net/core/dev.c | 54 | ||||
-rw-r--r-- | net/core/gen_estimator.c | 9 | ||||
-rw-r--r-- | net/core/skbuff.c | 12 |
4 files changed, 128 insertions, 34 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index dd61dcad6019..52f577a0f544 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -339,6 +339,93 @@ fault: | |||
339 | return -EFAULT; | 339 | return -EFAULT; |
340 | } | 340 | } |
341 | 341 | ||
342 | /** | ||
343 | * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. | ||
344 | * @skb: buffer to copy | ||
345 | * @offset: offset in the buffer to start copying to | ||
346 | * @from: io vector to copy to | ||
347 | * @len: amount of data to copy to buffer from iovec | ||
348 | * | ||
349 | * Returns 0 or -EFAULT. | ||
350 | * Note: the iovec is modified during the copy. | ||
351 | */ | ||
352 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | ||
353 | struct iovec *from, int len) | ||
354 | { | ||
355 | int start = skb_headlen(skb); | ||
356 | int i, copy = start - offset; | ||
357 | |||
358 | /* Copy header. */ | ||
359 | if (copy > 0) { | ||
360 | if (copy > len) | ||
361 | copy = len; | ||
362 | if (memcpy_fromiovec(skb->data + offset, from, copy)) | ||
363 | goto fault; | ||
364 | if ((len -= copy) == 0) | ||
365 | return 0; | ||
366 | offset += copy; | ||
367 | } | ||
368 | |||
369 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | ||
370 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
371 | int end; | ||
372 | |||
373 | WARN_ON(start > offset + len); | ||
374 | |||
375 | end = start + skb_shinfo(skb)->frags[i].size; | ||
376 | if ((copy = end - offset) > 0) { | ||
377 | int err; | ||
378 | u8 *vaddr; | ||
379 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
380 | struct page *page = frag->page; | ||
381 | |||
382 | if (copy > len) | ||
383 | copy = len; | ||
384 | vaddr = kmap(page); | ||
385 | err = memcpy_fromiovec(vaddr + frag->page_offset + | ||
386 | offset - start, from, copy); | ||
387 | kunmap(page); | ||
388 | if (err) | ||
389 | goto fault; | ||
390 | |||
391 | if (!(len -= copy)) | ||
392 | return 0; | ||
393 | offset += copy; | ||
394 | } | ||
395 | start = end; | ||
396 | } | ||
397 | |||
398 | if (skb_shinfo(skb)->frag_list) { | ||
399 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | ||
400 | |||
401 | for (; list; list = list->next) { | ||
402 | int end; | ||
403 | |||
404 | WARN_ON(start > offset + len); | ||
405 | |||
406 | end = start + list->len; | ||
407 | if ((copy = end - offset) > 0) { | ||
408 | if (copy > len) | ||
409 | copy = len; | ||
410 | if (skb_copy_datagram_from_iovec(list, | ||
411 | offset - start, | ||
412 | from, copy)) | ||
413 | goto fault; | ||
414 | if ((len -= copy) == 0) | ||
415 | return 0; | ||
416 | offset += copy; | ||
417 | } | ||
418 | start = end; | ||
419 | } | ||
420 | } | ||
421 | if (!len) | ||
422 | return 0; | ||
423 | |||
424 | fault: | ||
425 | return -EFAULT; | ||
426 | } | ||
427 | EXPORT_SYMBOL(skb_copy_datagram_from_iovec); | ||
428 | |||
342 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | 429 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
343 | u8 __user *to, int len, | 430 | u8 __user *to, int len, |
344 | __wsum *csump) | 431 | __wsum *csump) |
diff --git a/net/core/dev.c b/net/core/dev.c index 600bb23c4c2e..e719ed29310f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | 1341 | ||
1342 | void __netif_schedule(struct Qdisc *q) | 1342 | static inline void __netif_reschedule(struct Qdisc *q) |
1343 | { | 1343 | { |
1344 | if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { | 1344 | struct softnet_data *sd; |
1345 | struct softnet_data *sd; | 1345 | unsigned long flags; |
1346 | unsigned long flags; | ||
1347 | 1346 | ||
1348 | local_irq_save(flags); | 1347 | local_irq_save(flags); |
1349 | sd = &__get_cpu_var(softnet_data); | 1348 | sd = &__get_cpu_var(softnet_data); |
1350 | q->next_sched = sd->output_queue; | 1349 | q->next_sched = sd->output_queue; |
1351 | sd->output_queue = q; | 1350 | sd->output_queue = q; |
1352 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 1351 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
1353 | local_irq_restore(flags); | 1352 | local_irq_restore(flags); |
1354 | } | 1353 | } |
1354 | |||
1355 | void __netif_schedule(struct Qdisc *q) | ||
1356 | { | ||
1357 | if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) | ||
1358 | __netif_reschedule(q); | ||
1355 | } | 1359 | } |
1356 | EXPORT_SYMBOL(__netif_schedule); | 1360 | EXPORT_SYMBOL(__netif_schedule); |
1357 | 1361 | ||
@@ -1800,9 +1804,13 @@ gso: | |||
1800 | 1804 | ||
1801 | spin_lock(root_lock); | 1805 | spin_lock(root_lock); |
1802 | 1806 | ||
1803 | rc = qdisc_enqueue_root(skb, q); | 1807 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
1804 | qdisc_run(q); | 1808 | kfree_skb(skb); |
1805 | 1809 | rc = NET_XMIT_DROP; | |
1810 | } else { | ||
1811 | rc = qdisc_enqueue_root(skb, q); | ||
1812 | qdisc_run(q); | ||
1813 | } | ||
1806 | spin_unlock(root_lock); | 1814 | spin_unlock(root_lock); |
1807 | 1815 | ||
1808 | goto out; | 1816 | goto out; |
@@ -1974,15 +1982,22 @@ static void net_tx_action(struct softirq_action *h) | |||
1974 | 1982 | ||
1975 | head = head->next_sched; | 1983 | head = head->next_sched; |
1976 | 1984 | ||
1977 | smp_mb__before_clear_bit(); | ||
1978 | clear_bit(__QDISC_STATE_SCHED, &q->state); | ||
1979 | |||
1980 | root_lock = qdisc_lock(q); | 1985 | root_lock = qdisc_lock(q); |
1981 | if (spin_trylock(root_lock)) { | 1986 | if (spin_trylock(root_lock)) { |
1987 | smp_mb__before_clear_bit(); | ||
1988 | clear_bit(__QDISC_STATE_SCHED, | ||
1989 | &q->state); | ||
1982 | qdisc_run(q); | 1990 | qdisc_run(q); |
1983 | spin_unlock(root_lock); | 1991 | spin_unlock(root_lock); |
1984 | } else { | 1992 | } else { |
1985 | __netif_schedule(q); | 1993 | if (!test_bit(__QDISC_STATE_DEACTIVATED, |
1994 | &q->state)) { | ||
1995 | __netif_reschedule(q); | ||
1996 | } else { | ||
1997 | smp_mb__before_clear_bit(); | ||
1998 | clear_bit(__QDISC_STATE_SCHED, | ||
1999 | &q->state); | ||
2000 | } | ||
1986 | } | 2001 | } |
1987 | } | 2002 | } |
1988 | } | 2003 | } |
@@ -2084,7 +2099,8 @@ static int ing_filter(struct sk_buff *skb) | |||
2084 | q = rxq->qdisc; | 2099 | q = rxq->qdisc; |
2085 | if (q != &noop_qdisc) { | 2100 | if (q != &noop_qdisc) { |
2086 | spin_lock(qdisc_lock(q)); | 2101 | spin_lock(qdisc_lock(q)); |
2087 | result = qdisc_enqueue_root(skb, q); | 2102 | if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) |
2103 | result = qdisc_enqueue_root(skb, q); | ||
2088 | spin_unlock(qdisc_lock(q)); | 2104 | spin_unlock(qdisc_lock(q)); |
2089 | } | 2105 | } |
2090 | 2106 | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index a89f32fa94f6..57abe8266be1 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -99,7 +99,7 @@ struct gen_estimator_head | |||
99 | 99 | ||
100 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; | 100 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; |
101 | 101 | ||
102 | /* Protects against NULL dereference and RCU write-side */ | 102 | /* Protects against NULL dereference */ |
103 | static DEFINE_RWLOCK(est_lock); | 103 | static DEFINE_RWLOCK(est_lock); |
104 | 104 | ||
105 | static void est_timer(unsigned long arg) | 105 | static void est_timer(unsigned long arg) |
@@ -185,7 +185,6 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, | |||
185 | est->last_packets = bstats->packets; | 185 | est->last_packets = bstats->packets; |
186 | est->avpps = rate_est->pps<<10; | 186 | est->avpps = rate_est->pps<<10; |
187 | 187 | ||
188 | write_lock_bh(&est_lock); | ||
189 | if (!elist[idx].timer.function) { | 188 | if (!elist[idx].timer.function) { |
190 | INIT_LIST_HEAD(&elist[idx].list); | 189 | INIT_LIST_HEAD(&elist[idx].list); |
191 | setup_timer(&elist[idx].timer, est_timer, idx); | 190 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -195,7 +194,6 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, | |||
195 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); | 194 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
196 | 195 | ||
197 | list_add_rcu(&est->list, &elist[idx].list); | 196 | list_add_rcu(&est->list, &elist[idx].list); |
198 | write_unlock_bh(&est_lock); | ||
199 | return 0; | 197 | return 0; |
200 | } | 198 | } |
201 | 199 | ||
@@ -214,6 +212,7 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
214 | * Removes the rate estimator specified by &bstats and &rate_est | 212 | * Removes the rate estimator specified by &bstats and &rate_est |
215 | * and deletes the timer. | 213 | * and deletes the timer. |
216 | * | 214 | * |
215 | * NOTE: Called under rtnl_mutex | ||
217 | */ | 216 | */ |
218 | void gen_kill_estimator(struct gnet_stats_basic *bstats, | 217 | void gen_kill_estimator(struct gnet_stats_basic *bstats, |
219 | struct gnet_stats_rate_est *rate_est) | 218 | struct gnet_stats_rate_est *rate_est) |
@@ -227,17 +226,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats, | |||
227 | if (!elist[idx].timer.function) | 226 | if (!elist[idx].timer.function) |
228 | continue; | 227 | continue; |
229 | 228 | ||
230 | write_lock_bh(&est_lock); | ||
231 | list_for_each_entry_safe(e, n, &elist[idx].list, list) { | 229 | list_for_each_entry_safe(e, n, &elist[idx].list, list) { |
232 | if (e->rate_est != rate_est || e->bstats != bstats) | 230 | if (e->rate_est != rate_est || e->bstats != bstats) |
233 | continue; | 231 | continue; |
234 | 232 | ||
233 | write_lock_bh(&est_lock); | ||
235 | e->bstats = NULL; | 234 | e->bstats = NULL; |
235 | write_unlock_bh(&est_lock); | ||
236 | 236 | ||
237 | list_del_rcu(&e->list); | 237 | list_del_rcu(&e->list); |
238 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 238 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
239 | } | 239 | } |
240 | write_unlock_bh(&est_lock); | ||
241 | } | 240 | } |
242 | } | 241 | } |
243 | 242 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 84640172d65d..ca1ccdf1ef76 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2256 | segs = nskb; | 2256 | segs = nskb; |
2257 | tail = nskb; | 2257 | tail = nskb; |
2258 | 2258 | ||
2259 | nskb->dev = skb->dev; | 2259 | __copy_skb_header(nskb, skb); |
2260 | skb_copy_queue_mapping(nskb, skb); | ||
2261 | nskb->priority = skb->priority; | ||
2262 | nskb->protocol = skb->protocol; | ||
2263 | nskb->vlan_tci = skb->vlan_tci; | ||
2264 | nskb->dst = dst_clone(skb->dst); | ||
2265 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); | ||
2266 | nskb->pkt_type = skb->pkt_type; | ||
2267 | nskb->mac_len = skb->mac_len; | 2260 | nskb->mac_len = skb->mac_len; |
2268 | 2261 | ||
2269 | skb_reserve(nskb, headroom); | 2262 | skb_reserve(nskb, headroom); |
@@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2274 | skb_copy_from_linear_data(skb, skb_put(nskb, doffset), | 2267 | skb_copy_from_linear_data(skb, skb_put(nskb, doffset), |
2275 | doffset); | 2268 | doffset); |
2276 | if (!sg) { | 2269 | if (!sg) { |
2270 | nskb->ip_summed = CHECKSUM_NONE; | ||
2277 | nskb->csum = skb_copy_and_csum_bits(skb, offset, | 2271 | nskb->csum = skb_copy_and_csum_bits(skb, offset, |
2278 | skb_put(nskb, len), | 2272 | skb_put(nskb, len), |
2279 | len, 0); | 2273 | len, 0); |
@@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2283 | frag = skb_shinfo(nskb)->frags; | 2277 | frag = skb_shinfo(nskb)->frags; |
2284 | k = 0; | 2278 | k = 0; |
2285 | 2279 | ||
2286 | nskb->ip_summed = CHECKSUM_PARTIAL; | ||
2287 | nskb->csum = skb->csum; | ||
2288 | skb_copy_from_linear_data_offset(skb, offset, | 2280 | skb_copy_from_linear_data_offset(skb, offset, |
2289 | skb_put(nskb, hsize), hsize); | 2281 | skb_put(nskb, hsize), hsize); |
2290 | 2282 | ||