aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/sock.c33
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/udp.c14
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/netfilter/xt_TEE.c4
11 files changed, 70 insertions, 27 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e0097531417a..f5b6f43a4c2e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
229 229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{ 231{
232 bool slow;
233
232 if (likely(atomic_read(&skb->users) == 1)) 234 if (likely(atomic_read(&skb->users) == 1))
233 smp_rmb(); 235 smp_rmb();
234 else if (likely(!atomic_dec_and_test(&skb->users))) 236 else if (likely(!atomic_dec_and_test(&skb->users)))
235 return; 237 return;
236 238
237 lock_sock_bh(sk); 239 slow = lock_sock_fast(sk);
238 skb_orphan(skb); 240 skb_orphan(skb);
239 sk_mem_reclaim_partial(sk); 241 sk_mem_reclaim_partial(sk);
240 unlock_sock_bh(sk); 242 unlock_sock_fast(sk, slow);
241 243
242 /* skb is now orphaned, can be freed outside of locked section */ 244 /* skb is now orphaned, can be freed outside of locked section */
243 __kfree_skb(skb); 245 __kfree_skb(skb);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bff37908bd55..6ba1c0eece03 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
934 kfree_skb(buff); 934 kfree_skb(buff);
935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
936 } 936 }
937 skb_dst_force(skb);
937 __skb_queue_tail(&neigh->arp_queue, skb); 938 __skb_queue_tail(&neigh->arp_queue, skb);
938 } 939 }
939 rc = 1; 940 rc = 1;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7ab86f3a1ea4..1a2af24e9e3d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
651 651
652 int num_vfs = dev_num_vf(dev->dev.parent); 652 int num_vfs = dev_num_vf(dev->dev.parent);
653 size_t size = nlmsg_total_size(sizeof(struct nlattr)); 653 size_t size = nla_total_size(sizeof(struct nlattr));
654 size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); 654 size += nla_total_size(num_vfs * sizeof(struct nlattr));
655 size += num_vfs * (sizeof(struct ifla_vf_mac) + 655 size += num_vfs *
656 sizeof(struct ifla_vf_vlan) + 656 (nla_total_size(sizeof(struct ifla_vf_mac)) +
657 sizeof(struct ifla_vf_tx_rate)); 657 nla_total_size(sizeof(struct ifla_vf_vlan)) +
658 nla_total_size(sizeof(struct ifla_vf_tx_rate)));
658 return size; 659 return size;
659 } else 660 } else
660 return 0; 661 return 0;
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
722 723
723 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 724 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
724 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 725 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
725 if (!vf_port) { 726 if (!vf_port)
726 nla_nest_cancel(skb, vf_ports); 727 goto nla_put_failure;
727 return -EMSGSIZE;
728 }
729 NLA_PUT_U32(skb, IFLA_PORT_VF, vf); 728 NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
730 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 729 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
730 if (err == -EMSGSIZE)
731 goto nla_put_failure;
731 if (err) { 732 if (err) {
732nla_put_failure:
733 nla_nest_cancel(skb, vf_port); 733 nla_nest_cancel(skb, vf_port);
734 continue; 734 continue;
735 } 735 }
@@ -739,6 +739,10 @@ nla_put_failure:
739 nla_nest_end(skb, vf_ports); 739 nla_nest_end(skb, vf_ports);
740 740
741 return 0; 741 return 0;
742
743nla_put_failure:
744 nla_nest_cancel(skb, vf_ports);
745 return -EMSGSIZE;
742} 746}
743 747
744static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 748static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
753 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 757 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
754 if (err) { 758 if (err) {
755 nla_nest_cancel(skb, port_self); 759 nla_nest_cancel(skb, port_self);
756 return err; 760 return (err == -EMSGSIZE) ? err : 0;
757 } 761 }
758 762
759 nla_nest_end(skb, port_self); 763 nla_nest_end(skb, port_self);
diff --git a/net/core/sock.c b/net/core/sock.c
index 37fe9b6adade..2cf7f9f7e775 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk)
2007} 2007}
2008EXPORT_SYMBOL(release_sock); 2008EXPORT_SYMBOL(release_sock);
2009 2009
2010/**
2011 * lock_sock_fast - fast version of lock_sock
2012 * @sk: socket
2013 *
2014 * This version should be used for very small section, where process wont block
2015 * return false if fast path is taken
2016 * sk_lock.slock locked, owned = 0, BH disabled
2017 * return true if slow path is taken
2018 * sk_lock.slock unlocked, owned = 1, BH enabled
2019 */
2020bool lock_sock_fast(struct sock *sk)
2021{
2022 might_sleep();
2023 spin_lock_bh(&sk->sk_lock.slock);
2024
2025 if (!sk->sk_lock.owned)
2026 /*
2027 * Note : We must disable BH
2028 */
2029 return false;
2030
2031 __lock_sock(sk);
2032 sk->sk_lock.owned = 1;
2033 spin_unlock(&sk->sk_lock.slock);
2034 /*
2035 * The sk_lock has mutex_lock() semantics here:
2036 */
2037 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2038 local_bh_enable();
2039 return true;
2040}
2041EXPORT_SYMBOL(lock_sock_fast);
2042
2010int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2043int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2011{ 2044{
2012 struct timeval tv; 2045 struct timeval tv;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 45889103b3e2..856123fe32f9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1911 struct rtattr *mp_head; 1911 struct rtattr *mp_head;
1912 1912
1913 /* If cache is unresolved, don't try to parse IIF and OIF */ 1913 /* If cache is unresolved, don't try to parse IIF and OIF */
1914 if (c->mfc_parent > MAXVIFS) 1914 if (c->mfc_parent >= MAXVIFS)
1915 return -ENOENT; 1915 return -ENOENT;
1916 1916
1917 if (VIF_EXISTS(mrt, c->mfc_parent)) 1917 if (VIF_EXISTS(mrt, c->mfc_parent))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index baeec29fe0f1..58585748bdac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
1063 spin_unlock_bh(&rcvq->lock); 1063 spin_unlock_bh(&rcvq->lock);
1064 1064
1065 if (!skb_queue_empty(&list_kill)) { 1065 if (!skb_queue_empty(&list_kill)) {
1066 lock_sock_bh(sk); 1066 bool slow = lock_sock_fast(sk);
1067
1067 __skb_queue_purge(&list_kill); 1068 __skb_queue_purge(&list_kill);
1068 sk_mem_reclaim_partial(sk); 1069 sk_mem_reclaim_partial(sk);
1069 unlock_sock_bh(sk); 1070 unlock_sock_fast(sk, slow);
1070 } 1071 }
1071 return res; 1072 return res;
1072} 1073}
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1123 int peeked; 1124 int peeked;
1124 int err; 1125 int err;
1125 int is_udplite = IS_UDPLITE(sk); 1126 int is_udplite = IS_UDPLITE(sk);
1127 bool slow;
1126 1128
1127 /* 1129 /*
1128 * Check any passed addresses 1130 * Check any passed addresses
@@ -1197,10 +1199,10 @@ out:
1197 return err; 1199 return err;
1198 1200
1199csum_copy_err: 1201csum_copy_err:
1200 lock_sock_bh(sk); 1202 slow = lock_sock_fast(sk);
1201 if (!skb_kill_datagram(sk, skb, flags)) 1203 if (!skb_kill_datagram(sk, skb, flags))
1202 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1204 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1203 unlock_sock_bh(sk); 1205 unlock_sock_fast(sk, slow);
1204 1206
1205 if (noblock) 1207 if (noblock)
1206 return -EAGAIN; 1208 return -EAGAIN;
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
1625 1627
1626void udp_destroy_sock(struct sock *sk) 1628void udp_destroy_sock(struct sock *sk)
1627{ 1629{
1628 lock_sock_bh(sk); 1630 bool slow = lock_sock_fast(sk);
1629 udp_flush_pending_frames(sk); 1631 udp_flush_pending_frames(sk);
1630 unlock_sock_bh(sk); 1632 unlock_sock_fast(sk, slow);
1631} 1633}
1632 1634
1633/* 1635/*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd963f64e27c..89425af0684c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb)
507 if (mtu < IPV6_MIN_MTU) 507 if (mtu < IPV6_MIN_MTU)
508 mtu = IPV6_MIN_MTU; 508 mtu = IPV6_MIN_MTU;
509 509
510 if (skb->len > mtu) { 510 if (skb->len > mtu && !skb_is_gso(skb)) {
511 /* Again, force OUTPUT device used as source address */ 511 /* Again, force OUTPUT device used as source address */
512 skb->dev = dst->dev; 512 skb->dev = dst->dev;
513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index bd9e7d3e9c8e..073071f2b75b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2017 struct rtattr *mp_head; 2017 struct rtattr *mp_head;
2018 2018
2019 /* If cache is unresolved, don't try to parse IIF and OIF */ 2019 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c->mf6c_parent > MAXMIFS) 2020 if (c->mf6c_parent >= MAXMIFS)
2021 return -ENOENT; 2021 return -ENOENT;
2022 2022
2023 if (MIF_EXISTS(mrt, c->mf6c_parent)) 2023 if (MIF_EXISTS(mrt, c->mf6c_parent))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3d7a2c0b836a..87be58673b55 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
328 int err; 328 int err;
329 int is_udplite = IS_UDPLITE(sk); 329 int is_udplite = IS_UDPLITE(sk);
330 int is_udp4; 330 int is_udp4;
331 bool slow;
331 332
332 if (addr_len) 333 if (addr_len)
333 *addr_len=sizeof(struct sockaddr_in6); 334 *addr_len=sizeof(struct sockaddr_in6);
@@ -424,7 +425,7 @@ out:
424 return err; 425 return err;
425 426
426csum_copy_err: 427csum_copy_err:
427 lock_sock_bh(sk); 428 slow = lock_sock_fast(sk);
428 if (!skb_kill_datagram(sk, skb, flags)) { 429 if (!skb_kill_datagram(sk, skb, flags)) {
429 if (is_udp4) 430 if (is_udp4)
430 UDP_INC_STATS_USER(sock_net(sk), 431 UDP_INC_STATS_USER(sock_net(sk),
@@ -433,7 +434,7 @@ csum_copy_err:
433 UDP6_INC_STATS_USER(sock_net(sk), 434 UDP6_INC_STATS_USER(sock_net(sk),
434 UDP_MIB_INERRORS, is_udplite); 435 UDP_MIB_INERRORS, is_udplite);
435 } 436 }
436 unlock_sock_bh(sk); 437 unlock_sock_fast(sk, slow);
437 438
438 if (flags & MSG_DONTWAIT) 439 if (flags & MSG_DONTWAIT)
439 return -EAGAIN; 440 return -EAGAIN;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c8b4599a752e..9637e45744fa 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1619save_message: 1619save_message:
1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1621 if (!save_msg) 1621 if (!save_msg)
1622 return; 1622 goto out_unlock;
1623 save_msg->path = path; 1623 save_msg->path = path;
1624 save_msg->msg = *msg; 1624 save_msg->msg = *msg;
1625 1625
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index d7920d9f49e9..859d9fd429c8 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 if (ip_route_output_key(net, &rt, &fl) != 0)
77 return false; 77 return false;
78 78
79 dst_release(skb_dst(skb)); 79 skb_dst_drop(skb);
80 skb_dst_set(skb, &rt->u.dst); 80 skb_dst_set(skb, &rt->u.dst);
81 skb->dev = rt->u.dst.dev; 81 skb->dev = rt->u.dst.dev;
82 skb->protocol = htons(ETH_P_IP); 82 skb->protocol = htons(ETH_P_IP);
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
157 if (dst == NULL) 157 if (dst == NULL)
158 return false; 158 return false;
159 159
160 dst_release(skb_dst(skb)); 160 skb_dst_drop(skb);
161 skb_dst_set(skb, dst); 161 skb_dst_set(skb, dst);
162 skb->dev = dst->dev; 162 skb->dev = dst->dev;
163 skb->protocol = htons(ETH_P_IPV6); 163 skb->protocol = htons(ETH_P_IPV6);