diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-07 05:17:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-07 05:17:34 -0400 |
commit | 44347d947f628060b92449702071bfe1d31dfb75 (patch) | |
tree | c6ed74610d5b3295df4296659f80f5feb94b28cc /net/core | |
parent | d94fc523f3c35bd8013f04827e94756cbc0212f4 (diff) | |
parent | 413f81eba35d6ede9289b0c8a920c013a84fac71 (diff) |
Merge branch 'linus' into tracing/core
Merge reason: tracing/core was on a .30-rc1 base and was missing out on
on a handful of tracing fixes present in .30-rc5-almost.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 14 | ||||
-rw-r--r-- | net/core/dev.c | 31 | ||||
-rw-r--r-- | net/core/skbuff.c | 27 |
3 files changed, 46 insertions, 26 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index d0de644b378d..b01a76abe1d2 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk) | |||
64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | 64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; |
65 | } | 65 | } |
66 | 66 | ||
67 | static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, | ||
68 | void *key) | ||
69 | { | ||
70 | unsigned long bits = (unsigned long)key; | ||
71 | |||
72 | /* | ||
73 | * Avoid a wakeup if event not interesting for us | ||
74 | */ | ||
75 | if (bits && !(bits & (POLLIN | POLLERR))) | ||
76 | return 0; | ||
77 | return autoremove_wake_function(wait, mode, sync, key); | ||
78 | } | ||
67 | /* | 79 | /* |
68 | * Wait for a packet.. | 80 | * Wait for a packet.. |
69 | */ | 81 | */ |
70 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | 82 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) |
71 | { | 83 | { |
72 | int error; | 84 | int error; |
73 | DEFINE_WAIT(wait); | 85 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
74 | 86 | ||
75 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 87 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
76 | 88 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 91d792d17e09..e2e9e4af3ace 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1336 | { | 1336 | { |
1337 | struct packet_type *ptype; | 1337 | struct packet_type *ptype; |
1338 | 1338 | ||
1339 | #ifdef CONFIG_NET_CLS_ACT | ||
1340 | if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) | ||
1341 | net_timestamp(skb); | ||
1342 | #else | ||
1339 | net_timestamp(skb); | 1343 | net_timestamp(skb); |
1344 | #endif | ||
1340 | 1345 | ||
1341 | rcu_read_lock(); | 1346 | rcu_read_lock(); |
1342 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 1347 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
@@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev) | |||
1430 | { | 1435 | { |
1431 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | 1436 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
1432 | netif_running(dev)) { | 1437 | netif_running(dev)) { |
1433 | netif_stop_queue(dev); | 1438 | netif_tx_stop_all_queues(dev); |
1434 | } | 1439 | } |
1435 | } | 1440 | } |
1436 | EXPORT_SYMBOL(netif_device_detach); | 1441 | EXPORT_SYMBOL(netif_device_detach); |
@@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev) | |||
1445 | { | 1450 | { |
1446 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | 1451 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
1447 | netif_running(dev)) { | 1452 | netif_running(dev)) { |
1448 | netif_wake_queue(dev); | 1453 | netif_tx_wake_all_queues(dev); |
1449 | __netdev_watchdog_up(dev); | 1454 | __netdev_watchdog_up(dev); |
1450 | } | 1455 | } |
1451 | } | 1456 | } |
@@ -1730,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1730 | { | 1735 | { |
1731 | u32 hash; | 1736 | u32 hash; |
1732 | 1737 | ||
1733 | if (skb_rx_queue_recorded(skb)) { | 1738 | if (skb_rx_queue_recorded(skb)) |
1734 | hash = skb_get_rx_queue(skb); | 1739 | return skb_get_rx_queue(skb) % dev->real_num_tx_queues; |
1735 | } else if (skb->sk && skb->sk->sk_hash) { | 1740 | |
1741 | if (skb->sk && skb->sk->sk_hash) | ||
1736 | hash = skb->sk->sk_hash; | 1742 | hash = skb->sk->sk_hash; |
1737 | } else | 1743 | else |
1738 | hash = skb->protocol; | 1744 | hash = skb->protocol; |
1739 | 1745 | ||
1740 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1746 | hash = jhash_1word(hash, skb_tx_hashrnd); |
@@ -2328,8 +2334,10 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2328 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 2334 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; |
2329 | int err = -ENOENT; | 2335 | int err = -ENOENT; |
2330 | 2336 | ||
2331 | if (NAPI_GRO_CB(skb)->count == 1) | 2337 | if (NAPI_GRO_CB(skb)->count == 1) { |
2338 | skb_shinfo(skb)->gso_size = 0; | ||
2332 | goto out; | 2339 | goto out; |
2340 | } | ||
2333 | 2341 | ||
2334 | rcu_read_lock(); | 2342 | rcu_read_lock(); |
2335 | list_for_each_entry_rcu(ptype, head, list) { | 2343 | list_for_each_entry_rcu(ptype, head, list) { |
@@ -2348,7 +2356,6 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2348 | } | 2356 | } |
2349 | 2357 | ||
2350 | out: | 2358 | out: |
2351 | skb_shinfo(skb)->gso_size = 0; | ||
2352 | return netif_receive_skb(skb); | 2359 | return netif_receive_skb(skb); |
2353 | } | 2360 | } |
2354 | 2361 | ||
@@ -2539,9 +2546,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2539 | } | 2546 | } |
2540 | 2547 | ||
2541 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); | 2548 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); |
2542 | frag = &info->frags[info->nr_frags - 1]; | 2549 | frag = info->frags; |
2543 | 2550 | ||
2544 | for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { | 2551 | for (i = 0; i < info->nr_frags; i++) { |
2545 | skb_fill_page_desc(skb, i, frag->page, frag->page_offset, | 2552 | skb_fill_page_desc(skb, i, frag->page, frag->page_offset, |
2546 | frag->size); | 2553 | frag->size); |
2547 | frag++; | 2554 | frag++; |
@@ -4399,7 +4406,7 @@ int register_netdevice(struct net_device *dev) | |||
4399 | dev->iflink = -1; | 4406 | dev->iflink = -1; |
4400 | 4407 | ||
4401 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | 4408 | #ifdef CONFIG_COMPAT_NET_DEV_OPS |
4402 | /* Netdevice_ops API compatiability support. | 4409 | /* Netdevice_ops API compatibility support. |
4403 | * This is temporary until all network devices are converted. | 4410 | * This is temporary until all network devices are converted. |
4404 | */ | 4411 | */ |
4405 | if (dev->netdev_ops) { | 4412 | if (dev->netdev_ops) { |
@@ -4410,7 +4417,7 @@ int register_netdevice(struct net_device *dev) | |||
4410 | dev->name, netdev_drivername(dev, drivername, 64)); | 4417 | dev->name, netdev_drivername(dev, drivername, 64)); |
4411 | 4418 | ||
4412 | /* This works only because net_device_ops and the | 4419 | /* This works only because net_device_ops and the |
4413 | compatiablity structure are the same. */ | 4420 | compatibility structure are the same. */ |
4414 | dev->netdev_ops = (void *) &(dev->init); | 4421 | dev->netdev_ops = (void *) &(dev->init); |
4415 | } | 4422 | } |
4416 | #endif | 4423 | #endif |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 12806b844456..f8bcc06ae8a0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1365,9 +1365,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1365 | 1365 | ||
1366 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, | 1366 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, |
1367 | unsigned int *offset, | 1367 | unsigned int *offset, |
1368 | struct sk_buff *skb) | 1368 | struct sk_buff *skb, struct sock *sk) |
1369 | { | 1369 | { |
1370 | struct sock *sk = skb->sk; | ||
1371 | struct page *p = sk->sk_sndmsg_page; | 1370 | struct page *p = sk->sk_sndmsg_page; |
1372 | unsigned int off; | 1371 | unsigned int off; |
1373 | 1372 | ||
@@ -1405,13 +1404,14 @@ new_page: | |||
1405 | */ | 1404 | */ |
1406 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1405 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1407 | unsigned int *len, unsigned int offset, | 1406 | unsigned int *len, unsigned int offset, |
1408 | struct sk_buff *skb, int linear) | 1407 | struct sk_buff *skb, int linear, |
1408 | struct sock *sk) | ||
1409 | { | 1409 | { |
1410 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1410 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1411 | return 1; | 1411 | return 1; |
1412 | 1412 | ||
1413 | if (linear) { | 1413 | if (linear) { |
1414 | page = linear_to_page(page, len, &offset, skb); | 1414 | page = linear_to_page(page, len, &offset, skb, sk); |
1415 | if (!page) | 1415 | if (!page) |
1416 | return 1; | 1416 | return 1; |
1417 | } else | 1417 | } else |
@@ -1442,7 +1442,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1442 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1442 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1443 | unsigned int plen, unsigned int *off, | 1443 | unsigned int plen, unsigned int *off, |
1444 | unsigned int *len, struct sk_buff *skb, | 1444 | unsigned int *len, struct sk_buff *skb, |
1445 | struct splice_pipe_desc *spd, int linear) | 1445 | struct splice_pipe_desc *spd, int linear, |
1446 | struct sock *sk) | ||
1446 | { | 1447 | { |
1447 | if (!*len) | 1448 | if (!*len) |
1448 | return 1; | 1449 | return 1; |
@@ -1465,7 +1466,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1465 | /* the linear region may spread across several pages */ | 1466 | /* the linear region may spread across several pages */ |
1466 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1467 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1467 | 1468 | ||
1468 | if (spd_fill_page(spd, page, &flen, poff, skb, linear)) | 1469 | if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) |
1469 | return 1; | 1470 | return 1; |
1470 | 1471 | ||
1471 | __segment_seek(&page, &poff, &plen, flen); | 1472 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1481,8 +1482,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1481 | * pipe is full or if we already spliced the requested length. | 1482 | * pipe is full or if we already spliced the requested length. |
1482 | */ | 1483 | */ |
1483 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1484 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, |
1484 | unsigned int *len, | 1485 | unsigned int *len, struct splice_pipe_desc *spd, |
1485 | struct splice_pipe_desc *spd) | 1486 | struct sock *sk) |
1486 | { | 1487 | { |
1487 | int seg; | 1488 | int seg; |
1488 | 1489 | ||
@@ -1492,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1492 | if (__splice_segment(virt_to_page(skb->data), | 1493 | if (__splice_segment(virt_to_page(skb->data), |
1493 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1494 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1494 | skb_headlen(skb), | 1495 | skb_headlen(skb), |
1495 | offset, len, skb, spd, 1)) | 1496 | offset, len, skb, spd, 1, sk)) |
1496 | return 1; | 1497 | return 1; |
1497 | 1498 | ||
1498 | /* | 1499 | /* |
@@ -1502,7 +1503,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1502 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1503 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1503 | 1504 | ||
1504 | if (__splice_segment(f->page, f->page_offset, f->size, | 1505 | if (__splice_segment(f->page, f->page_offset, f->size, |
1505 | offset, len, skb, spd, 0)) | 1506 | offset, len, skb, spd, 0, sk)) |
1506 | return 1; | 1507 | return 1; |
1507 | } | 1508 | } |
1508 | 1509 | ||
@@ -1528,12 +1529,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1528 | .ops = &sock_pipe_buf_ops, | 1529 | .ops = &sock_pipe_buf_ops, |
1529 | .spd_release = sock_spd_release, | 1530 | .spd_release = sock_spd_release, |
1530 | }; | 1531 | }; |
1532 | struct sock *sk = skb->sk; | ||
1531 | 1533 | ||
1532 | /* | 1534 | /* |
1533 | * __skb_splice_bits() only fails if the output has no room left, | 1535 | * __skb_splice_bits() only fails if the output has no room left, |
1534 | * so no point in going over the frag_list for the error case. | 1536 | * so no point in going over the frag_list for the error case. |
1535 | */ | 1537 | */ |
1536 | if (__skb_splice_bits(skb, &offset, &tlen, &spd)) | 1538 | if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) |
1537 | goto done; | 1539 | goto done; |
1538 | else if (!tlen) | 1540 | else if (!tlen) |
1539 | goto done; | 1541 | goto done; |
@@ -1545,14 +1547,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1545 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1547 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1546 | 1548 | ||
1547 | for (; list && tlen; list = list->next) { | 1549 | for (; list && tlen; list = list->next) { |
1548 | if (__skb_splice_bits(list, &offset, &tlen, &spd)) | 1550 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) |
1549 | break; | 1551 | break; |
1550 | } | 1552 | } |
1551 | } | 1553 | } |
1552 | 1554 | ||
1553 | done: | 1555 | done: |
1554 | if (spd.nr_pages) { | 1556 | if (spd.nr_pages) { |
1555 | struct sock *sk = skb->sk; | ||
1556 | int ret; | 1557 | int ret; |
1557 | 1558 | ||
1558 | /* | 1559 | /* |