aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/fib_rules.c7
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/rtnetlink.c31
-rw-r--r--net/core/skbuff.c103
-rw-r--r--net/core/sock.c11
8 files changed, 115 insertions, 99 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721db716350..b1b0c8d4d7df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2420 * 2. No high memory really exists on this machine. 2420 * 2. No high memory really exists on this machine.
2421 */ 2421 */
2422 2422
2423static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2423static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
2424{ 2424{
2425#ifdef CONFIG_HIGHMEM 2425#ifdef CONFIG_HIGHMEM
2426 int i; 2426 int i;
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2495} 2495}
2496 2496
2497static netdev_features_t harmonize_features(struct sk_buff *skb, 2497static netdev_features_t harmonize_features(struct sk_buff *skb,
2498 netdev_features_t features) 2498 const struct net_device *dev,
2499 netdev_features_t features)
2499{ 2500{
2500 if (skb->ip_summed != CHECKSUM_NONE && 2501 if (skb->ip_summed != CHECKSUM_NONE &&
2501 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2502 !can_checksum_protocol(features, skb_network_protocol(skb))) {
2502 features &= ~NETIF_F_ALL_CSUM; 2503 features &= ~NETIF_F_ALL_CSUM;
2503 } else if (illegal_highdma(skb->dev, skb)) { 2504 } else if (illegal_highdma(dev, skb)) {
2504 features &= ~NETIF_F_SG; 2505 features &= ~NETIF_F_SG;
2505 } 2506 }
2506 2507
2507 return features; 2508 return features;
2508} 2509}
2509 2510
2510netdev_features_t netif_skb_features(struct sk_buff *skb) 2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2512 const struct net_device *dev)
2511{ 2513{
2512 __be16 protocol = skb->protocol; 2514 __be16 protocol = skb->protocol;
2513 netdev_features_t features = skb->dev->features; 2515 netdev_features_t features = dev->features;
2514 2516
2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
2516 features &= ~NETIF_F_GSO_MASK; 2518 features &= ~NETIF_F_GSO_MASK;
2517 2519
2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2520 protocol = veh->h_vlan_encapsulated_proto; 2522 protocol = veh->h_vlan_encapsulated_proto;
2521 } else if (!vlan_tx_tag_present(skb)) { 2523 } else if (!vlan_tx_tag_present(skb)) {
2522 return harmonize_features(skb, features); 2524 return harmonize_features(skb, dev, features);
2523 } 2525 }
2524 2526
2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2526 NETIF_F_HW_VLAN_STAG_TX); 2528 NETIF_F_HW_VLAN_STAG_TX);
2527 2529
2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2531 NETIF_F_HW_VLAN_STAG_TX; 2533 NETIF_F_HW_VLAN_STAG_TX;
2532 2534
2533 return harmonize_features(skb, features); 2535 return harmonize_features(skb, dev, features);
2534} 2536}
2535EXPORT_SYMBOL(netif_skb_features); 2537EXPORT_SYMBOL(netif_skb_dev_features);
2536 2538
2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2538 struct netdev_queue *txq) 2540 struct netdev_queue *txq)
@@ -2803,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2803 * the BH enable code must have IRQs enabled so that it will not deadlock. 2805 * the BH enable code must have IRQs enabled so that it will not deadlock.
2804 * --BLG 2806 * --BLG
2805 */ 2807 */
2806int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2808static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2807{ 2809{
2808 struct net_device *dev = skb->dev; 2810 struct net_device *dev = skb->dev;
2809 struct netdev_queue *txq; 2811 struct netdev_queue *txq;
@@ -4637,7 +4639,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4637} 4639}
4638EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 4640EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4639 4641
4640int netdev_adjacent_sysfs_add(struct net_device *dev, 4642static int netdev_adjacent_sysfs_add(struct net_device *dev,
4641 struct net_device *adj_dev, 4643 struct net_device *adj_dev,
4642 struct list_head *dev_list) 4644 struct list_head *dev_list)
4643{ 4645{
@@ -4647,7 +4649,7 @@ int netdev_adjacent_sysfs_add(struct net_device *dev,
4647 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 4649 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4648 linkname); 4650 linkname);
4649} 4651}
4650void netdev_adjacent_sysfs_del(struct net_device *dev, 4652static void netdev_adjacent_sysfs_del(struct net_device *dev,
4651 char *name, 4653 char *name,
4652 struct list_head *dev_list) 4654 struct list_head *dev_list)
4653{ 4655{
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f409e0bd35c0..185c341fafbd 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
745 attach_rules(&ops->rules_list, dev); 745 attach_rules(&ops->rules_list, dev);
746 break; 746 break;
747 747
748 case NETDEV_CHANGENAME:
749 list_for_each_entry(ops, &net->rules_ops, list) {
750 detach_rules(&ops->rules_list, dev);
751 attach_rules(&ops->rules_list, dev);
752 }
753 break;
754
748 case NETDEV_UNREGISTER: 755 case NETDEV_UNREGISTER:
749 list_for_each_entry(ops, &net->rules_ops, list) 756 list_for_each_entry(ops, &net->rules_ops, list)
750 detach_rules(&ops->rules_list, dev); 757 detach_rules(&ops->rules_list, dev);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87577d447554..e29e810663d7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb)
323 return poff; 323 return poff;
324} 324}
325 325
326static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
327{
328 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
329 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
330 dev->name, queue_index,
331 dev->real_num_tx_queues);
332 return 0;
333 }
334 return queue_index;
335}
336
337static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 326static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
338{ 327{
339#ifdef CONFIG_XPS 328#ifdef CONFIG_XPS
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
372#endif 361#endif
373} 362}
374 363
375u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 364static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
376{ 365{
377 struct sock *sk = skb->sk; 366 struct sock *sk = skb->sk;
378 int queue_index = sk_tx_queue_get(sk); 367 int queue_index = sk_tx_queue_get(sk);
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
392 381
393 return queue_index; 382 return queue_index;
394} 383}
395EXPORT_SYMBOL(__netdev_pick_tx);
396 384
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 385struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb, 386 struct sk_buff *skb,
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
403 if (dev->real_num_tx_queues != 1) { 391 if (dev->real_num_tx_queues != 1) {
404 const struct net_device_ops *ops = dev->netdev_ops; 392 const struct net_device_ops *ops = dev->netdev_ops;
405 if (ops->ndo_select_queue) 393 if (ops->ndo_select_queue)
406 queue_index = ops->ndo_select_queue(dev, skb, 394 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
407 accel_priv); 395 __netdev_pick_tx);
408 else 396 else
409 queue_index = __netdev_pick_tx(dev, skb); 397 queue_index = __netdev_pick_tx(dev, skb);
410 398
411 if (!accel_priv) 399 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index); 400 queue_index = netdev_cap_txqueue(dev, queue_index);
413 } 401 }
414 402
415 skb_set_queue_mapping(skb, queue_index); 403 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b9e9e0d38672..e16129019c66 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -766,9 +766,6 @@ static void neigh_periodic_work(struct work_struct *work)
766 nht = rcu_dereference_protected(tbl->nht, 766 nht = rcu_dereference_protected(tbl->nht,
767 lockdep_is_held(&tbl->lock)); 767 lockdep_is_held(&tbl->lock));
768 768
769 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
770 goto out;
771
772 /* 769 /*
773 * periodically recompute ReachableTime from random function 770 * periodically recompute ReachableTime from random function
774 */ 771 */
@@ -781,6 +778,9 @@ static void neigh_periodic_work(struct work_struct *work)
781 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 778 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
782 } 779 }
783 780
781 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
782 goto out;
783
784 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 784 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
785 np = &nht->hash_buckets[i]; 785 np = &nht->hash_buckets[i];
786 786
@@ -3046,7 +3046,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3046 if (!t) 3046 if (!t)
3047 goto err; 3047 goto err;
3048 3048
3049 for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) { 3049 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3050 t->neigh_vars[i].data += (long) p; 3050 t->neigh_vars[i].data += (long) p;
3051 t->neigh_vars[i].extra1 = dev; 3051 t->neigh_vars[i].extra1 = dev;
3052 t->neigh_vars[i].extra2 = p; 3052 t->neigh_vars[i].extra2 = p;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c03f3dec4763..df9e6b1a9759 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
742 struct nd_msg *msg; 742 struct nd_msg *msg;
743 struct ipv6hdr *hdr; 743 struct ipv6hdr *hdr;
744 744
745 if (skb->protocol != htons(ETH_P_ARP)) 745 if (skb->protocol != htons(ETH_P_IPV6))
746 return false; 746 return false;
747 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) 747 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
748 return false; 748 return false;
@@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
948{ 948{
949 char *cur=opt, *delim; 949 char *cur=opt, *delim;
950 int ipv6; 950 int ipv6;
951 bool ipversion_set = false;
951 952
952 if (*cur != '@') { 953 if (*cur != '@') {
953 if ((delim = strchr(cur, '@')) == NULL) 954 if ((delim = strchr(cur, '@')) == NULL)
@@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
960 cur++; 961 cur++;
961 962
962 if (*cur != '/') { 963 if (*cur != '/') {
964 ipversion_set = true;
963 if ((delim = strchr(cur, '/')) == NULL) 965 if ((delim = strchr(cur, '/')) == NULL)
964 goto parse_failed; 966 goto parse_failed;
965 *delim = 0; 967 *delim = 0;
@@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
1002 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); 1004 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
1003 if (ipv6 < 0) 1005 if (ipv6 < 0)
1004 goto parse_failed; 1006 goto parse_failed;
1005 else if (np->ipv6 != (bool)ipv6) 1007 else if (ipversion_set && np->ipv6 != (bool)ipv6)
1006 goto parse_failed; 1008 goto parse_failed;
1007 else 1009 else
1008 np->ipv6 = (bool)ipv6; 1010 np->ipv6 = (bool)ipv6;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 393b1bc9a618..120eecc0f5a4 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -374,7 +374,7 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
374 if (!master_dev) 374 if (!master_dev)
375 return 0; 375 return 0;
376 ops = master_dev->rtnl_link_ops; 376 ops = master_dev->rtnl_link_ops;
377 if (!ops->get_slave_size) 377 if (!ops || !ops->get_slave_size)
378 return 0; 378 return 0;
379 /* IFLA_INFO_SLAVE_DATA + nested data */ 379 /* IFLA_INFO_SLAVE_DATA + nested data */
380 return nla_total_size(sizeof(struct nlattr)) + 380 return nla_total_size(sizeof(struct nlattr)) +
@@ -1963,16 +1963,21 @@ replay:
1963 1963
1964 dev->ifindex = ifm->ifi_index; 1964 dev->ifindex = ifm->ifi_index;
1965 1965
1966 if (ops->newlink) 1966 if (ops->newlink) {
1967 err = ops->newlink(net, dev, tb, data); 1967 err = ops->newlink(net, dev, tb, data);
1968 else 1968 /* Drivers should call free_netdev() in ->destructor
1969 * and unregister it on failure so that device could be
1970 * finally freed in rtnl_unlock.
1971 */
1972 if (err < 0)
1973 goto out;
1974 } else {
1969 err = register_netdevice(dev); 1975 err = register_netdevice(dev);
1970 1976 if (err < 0) {
1971 if (err < 0) { 1977 free_netdev(dev);
1972 free_netdev(dev); 1978 goto out;
1973 goto out; 1979 }
1974 } 1980 }
1975
1976 err = rtnl_configure_link(dev, ifm); 1981 err = rtnl_configure_link(dev, ifm);
1977 if (err < 0) 1982 if (err < 0)
1978 unregister_netdevice(dev); 1983 unregister_netdevice(dev);
@@ -2116,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
2116static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 2121static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2117 struct net_device *dev, 2122 struct net_device *dev,
2118 u8 *addr, u32 pid, u32 seq, 2123 u8 *addr, u32 pid, u32 seq,
2119 int type, unsigned int flags) 2124 int type, unsigned int flags,
2125 int nlflags)
2120{ 2126{
2121 struct nlmsghdr *nlh; 2127 struct nlmsghdr *nlh;
2122 struct ndmsg *ndm; 2128 struct ndmsg *ndm;
2123 2129
2124 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); 2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2125 if (!nlh) 2131 if (!nlh)
2126 return -EMSGSIZE; 2132 return -EMSGSIZE;
2127 2133
@@ -2159,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
2159 if (!skb) 2165 if (!skb)
2160 goto errout; 2166 goto errout;
2161 2167
2162 err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); 2168 err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
2163 if (err < 0) { 2169 if (err < 0) {
2164 kfree_skb(skb); 2170 kfree_skb(skb);
2165 goto errout; 2171 goto errout;
@@ -2384,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
2384 2390
2385 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 2391 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2386 portid, seq, 2392 portid, seq,
2387 RTM_NEWNEIGH, NTF_SELF); 2393 RTM_NEWNEIGH, NTF_SELF,
2394 NLM_F_MULTI);
2388 if (err < 0) 2395 if (err < 0)
2389 return err; 2396 return err;
2390skip: 2397skip:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5976ef0846bd..869c7afe3b07 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -707,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
707 new->mark = old->mark; 707 new->mark = old->mark;
708 new->skb_iif = old->skb_iif; 708 new->skb_iif = old->skb_iif;
709 __nf_copy(new, old); 709 __nf_copy(new, old);
710#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
711 new->nf_trace = old->nf_trace;
712#endif
713#ifdef CONFIG_NET_SCHED 710#ifdef CONFIG_NET_SCHED
714 new->tc_index = old->tc_index; 711 new->tc_index = old->tc_index;
715#ifdef CONFIG_NET_CLS_ACT 712#ifdef CONFIG_NET_CLS_ACT
@@ -2841,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2841 2838
2842/** 2839/**
2843 * skb_segment - Perform protocol segmentation on skb. 2840 * skb_segment - Perform protocol segmentation on skb.
2844 * @skb: buffer to segment 2841 * @head_skb: buffer to segment
2845 * @features: features for the output path (see dev->features) 2842 * @features: features for the output path (see dev->features)
2846 * 2843 *
2847 * This function performs segmentation on the given skb. It returns 2844 * This function performs segmentation on the given skb. It returns
2848 * a pointer to the first in a list of new skbs for the segments. 2845 * a pointer to the first in a list of new skbs for the segments.
2849 * In case of error it returns ERR_PTR(err). 2846 * In case of error it returns ERR_PTR(err).
2850 */ 2847 */
2851struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2848struct sk_buff *skb_segment(struct sk_buff *head_skb,
2849 netdev_features_t features)
2852{ 2850{
2853 struct sk_buff *segs = NULL; 2851 struct sk_buff *segs = NULL;
2854 struct sk_buff *tail = NULL; 2852 struct sk_buff *tail = NULL;
2855 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2853 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2856 skb_frag_t *skb_frag = skb_shinfo(skb)->frags; 2854 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2857 unsigned int mss = skb_shinfo(skb)->gso_size; 2855 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2858 unsigned int doffset = skb->data - skb_mac_header(skb); 2856 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2857 struct sk_buff *frag_skb = head_skb;
2859 unsigned int offset = doffset; 2858 unsigned int offset = doffset;
2860 unsigned int tnl_hlen = skb_tnl_header_len(skb); 2859 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2861 unsigned int headroom; 2860 unsigned int headroom;
2862 unsigned int len; 2861 unsigned int len;
2863 __be16 proto; 2862 __be16 proto;
2864 bool csum; 2863 bool csum;
2865 int sg = !!(features & NETIF_F_SG); 2864 int sg = !!(features & NETIF_F_SG);
2866 int nfrags = skb_shinfo(skb)->nr_frags; 2865 int nfrags = skb_shinfo(head_skb)->nr_frags;
2867 int err = -ENOMEM; 2866 int err = -ENOMEM;
2868 int i = 0; 2867 int i = 0;
2869 int pos; 2868 int pos;
2870 2869
2871 proto = skb_network_protocol(skb); 2870 proto = skb_network_protocol(head_skb);
2872 if (unlikely(!proto)) 2871 if (unlikely(!proto))
2873 return ERR_PTR(-EINVAL); 2872 return ERR_PTR(-EINVAL);
2874 2873
2875 csum = !!can_checksum_protocol(features, proto); 2874 csum = !!can_checksum_protocol(features, proto);
2876 __skb_push(skb, doffset); 2875 __skb_push(head_skb, doffset);
2877 headroom = skb_headroom(skb); 2876 headroom = skb_headroom(head_skb);
2878 pos = skb_headlen(skb); 2877 pos = skb_headlen(head_skb);
2879 2878
2880 do { 2879 do {
2881 struct sk_buff *nskb; 2880 struct sk_buff *nskb;
2882 skb_frag_t *frag; 2881 skb_frag_t *nskb_frag;
2883 int hsize; 2882 int hsize;
2884 int size; 2883 int size;
2885 2884
2886 len = skb->len - offset; 2885 len = head_skb->len - offset;
2887 if (len > mss) 2886 if (len > mss)
2888 len = mss; 2887 len = mss;
2889 2888
2890 hsize = skb_headlen(skb) - offset; 2889 hsize = skb_headlen(head_skb) - offset;
2891 if (hsize < 0) 2890 if (hsize < 0)
2892 hsize = 0; 2891 hsize = 0;
2893 if (hsize > len || !sg) 2892 if (hsize > len || !sg)
2894 hsize = len; 2893 hsize = len;
2895 2894
2896 if (!hsize && i >= nfrags && skb_headlen(fskb) && 2895 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2897 (skb_headlen(fskb) == len || sg)) { 2896 (skb_headlen(list_skb) == len || sg)) {
2898 BUG_ON(skb_headlen(fskb) > len); 2897 BUG_ON(skb_headlen(list_skb) > len);
2899 2898
2900 i = 0; 2899 i = 0;
2901 nfrags = skb_shinfo(fskb)->nr_frags; 2900 nfrags = skb_shinfo(list_skb)->nr_frags;
2902 skb_frag = skb_shinfo(fskb)->frags; 2901 frag = skb_shinfo(list_skb)->frags;
2903 pos += skb_headlen(fskb); 2902 frag_skb = list_skb;
2903 pos += skb_headlen(list_skb);
2904 2904
2905 while (pos < offset + len) { 2905 while (pos < offset + len) {
2906 BUG_ON(i >= nfrags); 2906 BUG_ON(i >= nfrags);
2907 2907
2908 size = skb_frag_size(skb_frag); 2908 size = skb_frag_size(frag);
2909 if (pos + size > offset + len) 2909 if (pos + size > offset + len)
2910 break; 2910 break;
2911 2911
2912 i++; 2912 i++;
2913 pos += size; 2913 pos += size;
2914 skb_frag++; 2914 frag++;
2915 } 2915 }
2916 2916
2917 nskb = skb_clone(fskb, GFP_ATOMIC); 2917 nskb = skb_clone(list_skb, GFP_ATOMIC);
2918 fskb = fskb->next; 2918 list_skb = list_skb->next;
2919 2919
2920 if (unlikely(!nskb)) 2920 if (unlikely(!nskb))
2921 goto err; 2921 goto err;
@@ -2936,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2936 __skb_push(nskb, doffset); 2936 __skb_push(nskb, doffset);
2937 } else { 2937 } else {
2938 nskb = __alloc_skb(hsize + doffset + headroom, 2938 nskb = __alloc_skb(hsize + doffset + headroom,
2939 GFP_ATOMIC, skb_alloc_rx_flag(skb), 2939 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2940 NUMA_NO_NODE); 2940 NUMA_NO_NODE);
2941 2941
2942 if (unlikely(!nskb)) 2942 if (unlikely(!nskb))
@@ -2952,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2952 segs = nskb; 2952 segs = nskb;
2953 tail = nskb; 2953 tail = nskb;
2954 2954
2955 __copy_skb_header(nskb, skb); 2955 __copy_skb_header(nskb, head_skb);
2956 nskb->mac_len = skb->mac_len; 2956 nskb->mac_len = head_skb->mac_len;
2957 2957
2958 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 2958 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2959 2959
2960 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 2960 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2961 nskb->data - tnl_hlen, 2961 nskb->data - tnl_hlen,
2962 doffset + tnl_hlen); 2962 doffset + tnl_hlen);
2963 2963
@@ -2966,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2966 2966
2967 if (!sg) { 2967 if (!sg) {
2968 nskb->ip_summed = CHECKSUM_NONE; 2968 nskb->ip_summed = CHECKSUM_NONE;
2969 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2969 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2970 skb_put(nskb, len), 2970 skb_put(nskb, len),
2971 len, 0); 2971 len, 0);
2972 continue; 2972 continue;
2973 } 2973 }
2974 2974
2975 frag = skb_shinfo(nskb)->frags; 2975 nskb_frag = skb_shinfo(nskb)->frags;
2976 2976
2977 skb_copy_from_linear_data_offset(skb, offset, 2977 skb_copy_from_linear_data_offset(head_skb, offset,
2978 skb_put(nskb, hsize), hsize); 2978 skb_put(nskb, hsize), hsize);
2979 2979
2980 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2980 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
2981 SKBTX_SHARED_FRAG;
2981 2982
2982 while (pos < offset + len) { 2983 while (pos < offset + len) {
2983 if (i >= nfrags) { 2984 if (i >= nfrags) {
2984 BUG_ON(skb_headlen(fskb)); 2985 BUG_ON(skb_headlen(list_skb));
2985 2986
2986 i = 0; 2987 i = 0;
2987 nfrags = skb_shinfo(fskb)->nr_frags; 2988 nfrags = skb_shinfo(list_skb)->nr_frags;
2988 skb_frag = skb_shinfo(fskb)->frags; 2989 frag = skb_shinfo(list_skb)->frags;
2990 frag_skb = list_skb;
2989 2991
2990 BUG_ON(!nfrags); 2992 BUG_ON(!nfrags);
2991 2993
2992 fskb = fskb->next; 2994 list_skb = list_skb->next;
2993 } 2995 }
2994 2996
2995 if (unlikely(skb_shinfo(nskb)->nr_frags >= 2997 if (unlikely(skb_shinfo(nskb)->nr_frags >=
@@ -3000,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
3000 goto err; 3002 goto err;
3001 } 3003 }
3002 3004
3003 *frag = *skb_frag; 3005 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3004 __skb_frag_ref(frag); 3006 goto err;
3005 size = skb_frag_size(frag); 3007
3008 *nskb_frag = *frag;
3009 __skb_frag_ref(nskb_frag);
3010 size = skb_frag_size(nskb_frag);
3006 3011
3007 if (pos < offset) { 3012 if (pos < offset) {
3008 frag->page_offset += offset - pos; 3013 nskb_frag->page_offset += offset - pos;
3009 skb_frag_size_sub(frag, offset - pos); 3014 skb_frag_size_sub(nskb_frag, offset - pos);
3010 } 3015 }
3011 3016
3012 skb_shinfo(nskb)->nr_frags++; 3017 skb_shinfo(nskb)->nr_frags++;
3013 3018
3014 if (pos + size <= offset + len) { 3019 if (pos + size <= offset + len) {
3015 i++; 3020 i++;
3016 skb_frag++; 3021 frag++;
3017 pos += size; 3022 pos += size;
3018 } else { 3023 } else {
3019 skb_frag_size_sub(frag, pos + size - (offset + len)); 3024 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3020 goto skip_fraglist; 3025 goto skip_fraglist;
3021 } 3026 }
3022 3027
3023 frag++; 3028 nskb_frag++;
3024 } 3029 }
3025 3030
3026skip_fraglist: 3031skip_fraglist:
@@ -3034,7 +3039,7 @@ perform_csum_check:
3034 nskb->len - doffset, 0); 3039 nskb->len - doffset, 0);
3035 nskb->ip_summed = CHECKSUM_NONE; 3040 nskb->ip_summed = CHECKSUM_NONE;
3036 } 3041 }
3037 } while ((offset += len) < skb->len); 3042 } while ((offset += len) < head_skb->len);
3038 3043
3039 return segs; 3044 return segs;
3040 3045
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c127dcdf6a8..c0fc6bdad1e3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1775,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1775 while (order) { 1775 while (order) {
1776 if (npages >= 1 << order) { 1776 if (npages >= 1 << order) {
1777 page = alloc_pages(sk->sk_allocation | 1777 page = alloc_pages(sk->sk_allocation |
1778 __GFP_COMP | __GFP_NOWARN, 1778 __GFP_COMP |
1779 __GFP_NOWARN |
1780 __GFP_NORETRY,
1779 order); 1781 order);
1780 if (page) 1782 if (page)
1781 goto fill_page; 1783 goto fill_page;
@@ -1845,7 +1847,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1845 gfp_t gfp = prio; 1847 gfp_t gfp = prio;
1846 1848
1847 if (order) 1849 if (order)
1848 gfp |= __GFP_COMP | __GFP_NOWARN; 1850 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1849 pfrag->page = alloc_pages(gfp, order); 1851 pfrag->page = alloc_pages(gfp, order);
1850 if (likely(pfrag->page)) { 1852 if (likely(pfrag->page)) {
1851 pfrag->offset = 0; 1853 pfrag->offset = 0;
@@ -2355,10 +2357,13 @@ void release_sock(struct sock *sk)
2355 if (sk->sk_backlog.tail) 2357 if (sk->sk_backlog.tail)
2356 __release_sock(sk); 2358 __release_sock(sk);
2357 2359
2360 /* Warning : release_cb() might need to release sk ownership,
2361 * ie call sock_release_ownership(sk) before us.
2362 */
2358 if (sk->sk_prot->release_cb) 2363 if (sk->sk_prot->release_cb)
2359 sk->sk_prot->release_cb(sk); 2364 sk->sk_prot->release_cb(sk);
2360 2365
2361 sk->sk_lock.owned = 0; 2366 sock_release_ownership(sk);
2362 if (waitqueue_active(&sk->sk_lock.wq)) 2367 if (waitqueue_active(&sk->sk_lock.wq))
2363 wake_up(&sk->sk_lock.wq); 2368 wake_up(&sk->sk_lock.wq);
2364 spin_unlock_bh(&sk->sk_lock.slock); 2369 spin_unlock_bh(&sk->sk_lock.slock);