diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 14 | ||||
-rw-r--r-- | net/core/dev.c | 36 | ||||
-rw-r--r-- | net/core/ethtool.c | 3 | ||||
-rw-r--r-- | net/core/gen_estimator.c | 13 | ||||
-rw-r--r-- | net/core/netpoll.c | 8 | ||||
-rw-r--r-- | net/core/pktgen.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 31 |
7 files changed, 67 insertions, 40 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index d0de644b378d..b01a76abe1d2 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk) | |||
64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | 64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; |
65 | } | 65 | } |
66 | 66 | ||
67 | static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, | ||
68 | void *key) | ||
69 | { | ||
70 | unsigned long bits = (unsigned long)key; | ||
71 | |||
72 | /* | ||
73 | * Avoid a wakeup if event not interesting for us | ||
74 | */ | ||
75 | if (bits && !(bits & (POLLIN | POLLERR))) | ||
76 | return 0; | ||
77 | return autoremove_wake_function(wait, mode, sync, key); | ||
78 | } | ||
67 | /* | 79 | /* |
68 | * Wait for a packet.. | 80 | * Wait for a packet.. |
69 | */ | 81 | */ |
70 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | 82 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) |
71 | { | 83 | { |
72 | int error; | 84 | int error; |
73 | DEFINE_WAIT(wait); | 85 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
74 | 86 | ||
75 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 87 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
76 | 88 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 52fea5b28ca6..e2e9e4af3ace 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1336 | { | 1336 | { |
1337 | struct packet_type *ptype; | 1337 | struct packet_type *ptype; |
1338 | 1338 | ||
1339 | #ifdef CONFIG_NET_CLS_ACT | ||
1340 | if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) | ||
1341 | net_timestamp(skb); | ||
1342 | #else | ||
1339 | net_timestamp(skb); | 1343 | net_timestamp(skb); |
1344 | #endif | ||
1340 | 1345 | ||
1341 | rcu_read_lock(); | 1346 | rcu_read_lock(); |
1342 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 1347 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
@@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev) | |||
1430 | { | 1435 | { |
1431 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | 1436 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
1432 | netif_running(dev)) { | 1437 | netif_running(dev)) { |
1433 | netif_stop_queue(dev); | 1438 | netif_tx_stop_all_queues(dev); |
1434 | } | 1439 | } |
1435 | } | 1440 | } |
1436 | EXPORT_SYMBOL(netif_device_detach); | 1441 | EXPORT_SYMBOL(netif_device_detach); |
@@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev) | |||
1445 | { | 1450 | { |
1446 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | 1451 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
1447 | netif_running(dev)) { | 1452 | netif_running(dev)) { |
1448 | netif_wake_queue(dev); | 1453 | netif_tx_wake_all_queues(dev); |
1449 | __netdev_watchdog_up(dev); | 1454 | __netdev_watchdog_up(dev); |
1450 | } | 1455 | } |
1451 | } | 1456 | } |
@@ -1730,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1730 | { | 1735 | { |
1731 | u32 hash; | 1736 | u32 hash; |
1732 | 1737 | ||
1733 | if (skb_rx_queue_recorded(skb)) { | 1738 | if (skb_rx_queue_recorded(skb)) |
1734 | hash = skb_get_rx_queue(skb); | 1739 | return skb_get_rx_queue(skb) % dev->real_num_tx_queues; |
1735 | } else if (skb->sk && skb->sk->sk_hash) { | 1740 | |
1741 | if (skb->sk && skb->sk->sk_hash) | ||
1736 | hash = skb->sk->sk_hash; | 1742 | hash = skb->sk->sk_hash; |
1737 | } else | 1743 | else |
1738 | hash = skb->protocol; | 1744 | hash = skb->protocol; |
1739 | 1745 | ||
1740 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1746 | hash = jhash_1word(hash, skb_tx_hashrnd); |
@@ -2328,8 +2334,10 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2328 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 2334 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; |
2329 | int err = -ENOENT; | 2335 | int err = -ENOENT; |
2330 | 2336 | ||
2331 | if (NAPI_GRO_CB(skb)->count == 1) | 2337 | if (NAPI_GRO_CB(skb)->count == 1) { |
2338 | skb_shinfo(skb)->gso_size = 0; | ||
2332 | goto out; | 2339 | goto out; |
2340 | } | ||
2333 | 2341 | ||
2334 | rcu_read_lock(); | 2342 | rcu_read_lock(); |
2335 | list_for_each_entry_rcu(ptype, head, list) { | 2343 | list_for_each_entry_rcu(ptype, head, list) { |
@@ -2348,7 +2356,6 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2348 | } | 2356 | } |
2349 | 2357 | ||
2350 | out: | 2358 | out: |
2351 | skb_shinfo(skb)->gso_size = 0; | ||
2352 | return netif_receive_skb(skb); | 2359 | return netif_receive_skb(skb); |
2353 | } | 2360 | } |
2354 | 2361 | ||
@@ -2472,8 +2479,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2472 | return GRO_NORMAL; | 2479 | return GRO_NORMAL; |
2473 | 2480 | ||
2474 | for (p = napi->gro_list; p; p = p->next) { | 2481 | for (p = napi->gro_list; p; p = p->next) { |
2475 | NAPI_GRO_CB(p)->same_flow = !compare_ether_header( | 2482 | NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev) |
2476 | skb_mac_header(p), skb_gro_mac_header(skb)); | 2483 | && !compare_ether_header(skb_mac_header(p), |
2484 | skb_gro_mac_header(skb)); | ||
2477 | NAPI_GRO_CB(p)->flush = 0; | 2485 | NAPI_GRO_CB(p)->flush = 0; |
2478 | } | 2486 | } |
2479 | 2487 | ||
@@ -2538,9 +2546,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2538 | } | 2546 | } |
2539 | 2547 | ||
2540 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); | 2548 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); |
2541 | frag = &info->frags[info->nr_frags - 1]; | 2549 | frag = info->frags; |
2542 | 2550 | ||
2543 | for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { | 2551 | for (i = 0; i < info->nr_frags; i++) { |
2544 | skb_fill_page_desc(skb, i, frag->page, frag->page_offset, | 2552 | skb_fill_page_desc(skb, i, frag->page, frag->page_offset, |
2545 | frag->size); | 2553 | frag->size); |
2546 | frag++; | 2554 | frag++; |
@@ -4398,7 +4406,7 @@ int register_netdevice(struct net_device *dev) | |||
4398 | dev->iflink = -1; | 4406 | dev->iflink = -1; |
4399 | 4407 | ||
4400 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | 4408 | #ifdef CONFIG_COMPAT_NET_DEV_OPS |
4401 | /* Netdevice_ops API compatiability support. | 4409 | /* Netdevice_ops API compatibility support. |
4402 | * This is temporary until all network devices are converted. | 4410 | * This is temporary until all network devices are converted. |
4403 | */ | 4411 | */ |
4404 | if (dev->netdev_ops) { | 4412 | if (dev->netdev_ops) { |
@@ -4409,7 +4417,7 @@ int register_netdevice(struct net_device *dev) | |||
4409 | dev->name, netdev_drivername(dev, drivername, 64)); | 4417 | dev->name, netdev_drivername(dev, drivername, 64)); |
4410 | 4418 | ||
4411 | /* This works only because net_device_ops and the | 4419 | /* This works only because net_device_ops and the |
4412 | compatiablity structure are the same. */ | 4420 | compatibility structure are the same. */ |
4413 | dev->netdev_ops = (void *) &(dev->init); | 4421 | dev->netdev_ops = (void *) &(dev->init); |
4414 | } | 4422 | } |
4415 | #endif | 4423 | #endif |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 244ca56dffac..d9d5160610d5 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -261,8 +261,7 @@ static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | |||
261 | ret = 0; | 261 | ret = 0; |
262 | 262 | ||
263 | err_out: | 263 | err_out: |
264 | if (rule_buf) | 264 | kfree(rule_buf); |
265 | kfree(rule_buf); | ||
266 | 265 | ||
267 | return ret; | 266 | return ret; |
268 | } | 267 | } |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9cc9f95b109e..6d62d4618cfc 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -66,9 +66,9 @@ | |||
66 | 66 | ||
67 | NOTES. | 67 | NOTES. |
68 | 68 | ||
69 | * The stored value for avbps is scaled by 2^5, so that maximal | 69 | * avbps is scaled by 2^5, avpps is scaled by 2^10. |
70 | rate is ~1Gbit, avpps is scaled by 2^10. | 70 | * both values are reported as 32 bit unsigned values. bps can |
71 | 71 | overflow for fast links : max speed being 34360Mbit/sec | |
72 | * Minimal interval is HZ/4=250msec (it is the greatest common divisor | 72 | * Minimal interval is HZ/4=250msec (it is the greatest common divisor |
73 | for HZ=100 and HZ=1024 8)), maximal interval | 73 | for HZ=100 and HZ=1024 8)), maximal interval |
74 | is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals | 74 | is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals |
@@ -86,9 +86,9 @@ struct gen_estimator | |||
86 | spinlock_t *stats_lock; | 86 | spinlock_t *stats_lock; |
87 | int ewma_log; | 87 | int ewma_log; |
88 | u64 last_bytes; | 88 | u64 last_bytes; |
89 | u64 avbps; | ||
89 | u32 last_packets; | 90 | u32 last_packets; |
90 | u32 avpps; | 91 | u32 avpps; |
91 | u32 avbps; | ||
92 | struct rcu_head e_rcu; | 92 | struct rcu_head e_rcu; |
93 | struct rb_node node; | 93 | struct rb_node node; |
94 | }; | 94 | }; |
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg) | |||
115 | rcu_read_lock(); | 115 | rcu_read_lock(); |
116 | list_for_each_entry_rcu(e, &elist[idx].list, list) { | 116 | list_for_each_entry_rcu(e, &elist[idx].list, list) { |
117 | u64 nbytes; | 117 | u64 nbytes; |
118 | u64 brate; | ||
118 | u32 npackets; | 119 | u32 npackets; |
119 | u32 rate; | 120 | u32 rate; |
120 | 121 | ||
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg) | |||
125 | 126 | ||
126 | nbytes = e->bstats->bytes; | 127 | nbytes = e->bstats->bytes; |
127 | npackets = e->bstats->packets; | 128 | npackets = e->bstats->packets; |
128 | rate = (nbytes - e->last_bytes)<<(7 - idx); | 129 | brate = (nbytes - e->last_bytes)<<(7 - idx); |
129 | e->last_bytes = nbytes; | 130 | e->last_bytes = nbytes; |
130 | e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; | 131 | e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log; |
131 | e->rate_est->bps = (e->avbps+0xF)>>5; | 132 | e->rate_est->bps = (e->avbps+0xF)>>5; |
132 | 133 | ||
133 | rate = (npackets - e->last_packets)<<(12 - idx); | 134 | rate = (npackets - e->last_packets)<<(12 - idx); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b5873bdff612..64f51eec6576 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi) | |||
175 | void netpoll_poll(struct netpoll *np) | 175 | void netpoll_poll(struct netpoll *np) |
176 | { | 176 | { |
177 | struct net_device *dev = np->dev; | 177 | struct net_device *dev = np->dev; |
178 | const struct net_device_ops *ops = dev->netdev_ops; | 178 | const struct net_device_ops *ops; |
179 | |||
180 | if (!dev || !netif_running(dev)) | ||
181 | return; | ||
179 | 182 | ||
180 | if (!dev || !netif_running(dev) || !ops->ndo_poll_controller) | 183 | ops = dev->netdev_ops; |
184 | if (!ops->ndo_poll_controller) | ||
181 | return; | 185 | return; |
182 | 186 | ||
183 | /* Process pending work on NIC */ | 187 | /* Process pending work on NIC */ |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 3779c1438c11..0666a827bc62 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev) | |||
2447 | if (pkt_dev->cflows) { | 2447 | if (pkt_dev->cflows) { |
2448 | /* let go of the SAs if we have them */ | 2448 | /* let go of the SAs if we have them */ |
2449 | int i = 0; | 2449 | int i = 0; |
2450 | for (; i < pkt_dev->nflows; i++){ | 2450 | for (; i < pkt_dev->cflows; i++) { |
2451 | struct xfrm_state *x = pkt_dev->flows[i].x; | 2451 | struct xfrm_state *x = pkt_dev->flows[i].x; |
2452 | if (x) { | 2452 | if (x) { |
2453 | xfrm_state_put(x); | 2453 | xfrm_state_put(x); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ce6356cd9f71..e505b5392e1e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
502 | shinfo->gso_segs = 0; | 502 | shinfo->gso_segs = 0; |
503 | shinfo->gso_type = 0; | 503 | shinfo->gso_type = 0; |
504 | shinfo->ip6_frag_id = 0; | 504 | shinfo->ip6_frag_id = 0; |
505 | shinfo->tx_flags.flags = 0; | ||
505 | shinfo->frag_list = NULL; | 506 | shinfo->frag_list = NULL; |
507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | ||
506 | 508 | ||
507 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 509 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
508 | skb->data = skb->head + NET_SKB_PAD; | 510 | skb->data = skb->head + NET_SKB_PAD; |
@@ -1365,9 +1367,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1365 | 1367 | ||
1366 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, | 1368 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, |
1367 | unsigned int *offset, | 1369 | unsigned int *offset, |
1368 | struct sk_buff *skb) | 1370 | struct sk_buff *skb, struct sock *sk) |
1369 | { | 1371 | { |
1370 | struct sock *sk = skb->sk; | ||
1371 | struct page *p = sk->sk_sndmsg_page; | 1372 | struct page *p = sk->sk_sndmsg_page; |
1372 | unsigned int off; | 1373 | unsigned int off; |
1373 | 1374 | ||
@@ -1405,13 +1406,14 @@ new_page: | |||
1405 | */ | 1406 | */ |
1406 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1407 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1407 | unsigned int *len, unsigned int offset, | 1408 | unsigned int *len, unsigned int offset, |
1408 | struct sk_buff *skb, int linear) | 1409 | struct sk_buff *skb, int linear, |
1410 | struct sock *sk) | ||
1409 | { | 1411 | { |
1410 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1412 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1411 | return 1; | 1413 | return 1; |
1412 | 1414 | ||
1413 | if (linear) { | 1415 | if (linear) { |
1414 | page = linear_to_page(page, len, &offset, skb); | 1416 | page = linear_to_page(page, len, &offset, skb, sk); |
1415 | if (!page) | 1417 | if (!page) |
1416 | return 1; | 1418 | return 1; |
1417 | } else | 1419 | } else |
@@ -1442,7 +1444,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1442 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1444 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1443 | unsigned int plen, unsigned int *off, | 1445 | unsigned int plen, unsigned int *off, |
1444 | unsigned int *len, struct sk_buff *skb, | 1446 | unsigned int *len, struct sk_buff *skb, |
1445 | struct splice_pipe_desc *spd, int linear) | 1447 | struct splice_pipe_desc *spd, int linear, |
1448 | struct sock *sk) | ||
1446 | { | 1449 | { |
1447 | if (!*len) | 1450 | if (!*len) |
1448 | return 1; | 1451 | return 1; |
@@ -1465,7 +1468,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1465 | /* the linear region may spread across several pages */ | 1468 | /* the linear region may spread across several pages */ |
1466 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1469 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1467 | 1470 | ||
1468 | if (spd_fill_page(spd, page, &flen, poff, skb, linear)) | 1471 | if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) |
1469 | return 1; | 1472 | return 1; |
1470 | 1473 | ||
1471 | __segment_seek(&page, &poff, &plen, flen); | 1474 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1481,8 +1484,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1481 | * pipe is full or if we already spliced the requested length. | 1484 | * pipe is full or if we already spliced the requested length. |
1482 | */ | 1485 | */ |
1483 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1486 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, |
1484 | unsigned int *len, | 1487 | unsigned int *len, struct splice_pipe_desc *spd, |
1485 | struct splice_pipe_desc *spd) | 1488 | struct sock *sk) |
1486 | { | 1489 | { |
1487 | int seg; | 1490 | int seg; |
1488 | 1491 | ||
@@ -1492,7 +1495,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1492 | if (__splice_segment(virt_to_page(skb->data), | 1495 | if (__splice_segment(virt_to_page(skb->data), |
1493 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1496 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1494 | skb_headlen(skb), | 1497 | skb_headlen(skb), |
1495 | offset, len, skb, spd, 1)) | 1498 | offset, len, skb, spd, 1, sk)) |
1496 | return 1; | 1499 | return 1; |
1497 | 1500 | ||
1498 | /* | 1501 | /* |
@@ -1502,7 +1505,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1502 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1505 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1503 | 1506 | ||
1504 | if (__splice_segment(f->page, f->page_offset, f->size, | 1507 | if (__splice_segment(f->page, f->page_offset, f->size, |
1505 | offset, len, skb, spd, 0)) | 1508 | offset, len, skb, spd, 0, sk)) |
1506 | return 1; | 1509 | return 1; |
1507 | } | 1510 | } |
1508 | 1511 | ||
@@ -1528,12 +1531,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1528 | .ops = &sock_pipe_buf_ops, | 1531 | .ops = &sock_pipe_buf_ops, |
1529 | .spd_release = sock_spd_release, | 1532 | .spd_release = sock_spd_release, |
1530 | }; | 1533 | }; |
1534 | struct sock *sk = skb->sk; | ||
1531 | 1535 | ||
1532 | /* | 1536 | /* |
1533 | * __skb_splice_bits() only fails if the output has no room left, | 1537 | * __skb_splice_bits() only fails if the output has no room left, |
1534 | * so no point in going over the frag_list for the error case. | 1538 | * so no point in going over the frag_list for the error case. |
1535 | */ | 1539 | */ |
1536 | if (__skb_splice_bits(skb, &offset, &tlen, &spd)) | 1540 | if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) |
1537 | goto done; | 1541 | goto done; |
1538 | else if (!tlen) | 1542 | else if (!tlen) |
1539 | goto done; | 1543 | goto done; |
@@ -1545,14 +1549,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1545 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1549 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1546 | 1550 | ||
1547 | for (; list && tlen; list = list->next) { | 1551 | for (; list && tlen; list = list->next) { |
1548 | if (__skb_splice_bits(list, &offset, &tlen, &spd)) | 1552 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) |
1549 | break; | 1553 | break; |
1550 | } | 1554 | } |
1551 | } | 1555 | } |
1552 | 1556 | ||
1553 | done: | 1557 | done: |
1554 | if (spd.nr_pages) { | 1558 | if (spd.nr_pages) { |
1555 | struct sock *sk = skb->sk; | ||
1556 | int ret; | 1559 | int ret; |
1557 | 1560 | ||
1558 | /* | 1561 | /* |
@@ -2285,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, | |||
2285 | next_skb: | 2288 | next_skb: |
2286 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; | 2289 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
2287 | 2290 | ||
2288 | if (abs_offset < block_limit) { | 2291 | if (abs_offset < block_limit && !st->frag_data) { |
2289 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); | 2292 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
2290 | return block_limit - abs_offset; | 2293 | return block_limit - abs_offset; |
2291 | } | 2294 | } |