aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c49
1 files changed, 40 insertions, 9 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index b0ba569bc973..bbb25be7ddfe 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -631,7 +631,7 @@ set_rcvbuf:
631 631
632 case SO_TIMESTAMPING: 632 case SO_TIMESTAMPING:
633 if (val & ~SOF_TIMESTAMPING_MASK) { 633 if (val & ~SOF_TIMESTAMPING_MASK) {
634 ret = EINVAL; 634 ret = -EINVAL;
635 break; 635 break;
636 } 636 }
637 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 637 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
@@ -919,13 +919,19 @@ static inline void sock_lock_init(struct sock *sk)
919 af_family_keys + sk->sk_family); 919 af_family_keys + sk->sk_family);
920} 920}
921 921
922/*
923 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
924 * even temporarly, because of RCU lookups. sk_node should also be left as is.
925 */
922static void sock_copy(struct sock *nsk, const struct sock *osk) 926static void sock_copy(struct sock *nsk, const struct sock *osk)
923{ 927{
924#ifdef CONFIG_SECURITY_NETWORK 928#ifdef CONFIG_SECURITY_NETWORK
925 void *sptr = nsk->sk_security; 929 void *sptr = nsk->sk_security;
926#endif 930#endif
927 931 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
928 memcpy(nsk, osk, osk->sk_prot->obj_size); 932 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
933 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
934 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
929#ifdef CONFIG_SECURITY_NETWORK 935#ifdef CONFIG_SECURITY_NETWORK
930 nsk->sk_security = sptr; 936 nsk->sk_security = sptr;
931 security_sk_clone(osk, nsk); 937 security_sk_clone(osk, nsk);
@@ -939,8 +945,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
939 struct kmem_cache *slab; 945 struct kmem_cache *slab;
940 946
941 slab = prot->slab; 947 slab = prot->slab;
942 if (slab != NULL) 948 if (slab != NULL) {
943 sk = kmem_cache_alloc(slab, priority); 949 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
950 if (!sk)
951 return sk;
952 if (priority & __GFP_ZERO) {
953 /*
954 * caches using SLAB_DESTROY_BY_RCU should let
955 * sk_node.next un-modified. Special care is taken
956 * when initializing object to zero.
957 */
958 if (offsetof(struct sock, sk_node.next) != 0)
959 memset(sk, 0, offsetof(struct sock, sk_node.next));
960 memset(&sk->sk_node.pprev, 0,
961 prot->obj_size - offsetof(struct sock,
962 sk_node.pprev));
963 }
964 }
944 else 965 else
945 sk = kmalloc(prot->obj_size, priority); 966 sk = kmalloc(prot->obj_size, priority);
946 967
@@ -1125,6 +1146,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1125 1146
1126 newsk->sk_err = 0; 1147 newsk->sk_err = 0;
1127 newsk->sk_priority = 0; 1148 newsk->sk_priority = 0;
1149 /*
1150 * Before updating sk_refcnt, we must commit prior changes to memory
1151 * (Documentation/RCU/rculist_nulls.txt for details)
1152 */
1153 smp_wmb();
1128 atomic_set(&newsk->sk_refcnt, 2); 1154 atomic_set(&newsk->sk_refcnt, 2);
1129 1155
1130 /* 1156 /*
@@ -1715,7 +1741,7 @@ EXPORT_SYMBOL(sock_no_sendpage);
1715static void sock_def_wakeup(struct sock *sk) 1741static void sock_def_wakeup(struct sock *sk)
1716{ 1742{
1717 read_lock(&sk->sk_callback_lock); 1743 read_lock(&sk->sk_callback_lock);
1718 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1744 if (sk_has_sleeper(sk))
1719 wake_up_interruptible_all(sk->sk_sleep); 1745 wake_up_interruptible_all(sk->sk_sleep);
1720 read_unlock(&sk->sk_callback_lock); 1746 read_unlock(&sk->sk_callback_lock);
1721} 1747}
@@ -1723,7 +1749,7 @@ static void sock_def_wakeup(struct sock *sk)
1723static void sock_def_error_report(struct sock *sk) 1749static void sock_def_error_report(struct sock *sk)
1724{ 1750{
1725 read_lock(&sk->sk_callback_lock); 1751 read_lock(&sk->sk_callback_lock);
1726 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1752 if (sk_has_sleeper(sk))
1727 wake_up_interruptible_poll(sk->sk_sleep, POLLERR); 1753 wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
1728 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1754 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1729 read_unlock(&sk->sk_callback_lock); 1755 read_unlock(&sk->sk_callback_lock);
@@ -1732,7 +1758,7 @@ static void sock_def_error_report(struct sock *sk)
1732static void sock_def_readable(struct sock *sk, int len) 1758static void sock_def_readable(struct sock *sk, int len)
1733{ 1759{
1734 read_lock(&sk->sk_callback_lock); 1760 read_lock(&sk->sk_callback_lock);
1735 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1761 if (sk_has_sleeper(sk))
1736 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | 1762 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
1737 POLLRDNORM | POLLRDBAND); 1763 POLLRDNORM | POLLRDBAND);
1738 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1764 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@@ -1747,7 +1773,7 @@ static void sock_def_write_space(struct sock *sk)
1747 * progress. --DaveM 1773 * progress. --DaveM
1748 */ 1774 */
1749 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1775 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1750 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1776 if (sk_has_sleeper(sk))
1751 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 1777 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
1752 POLLWRNORM | POLLWRBAND); 1778 POLLWRNORM | POLLWRBAND);
1753 1779
@@ -1840,6 +1866,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1840 1866
1841 sk->sk_stamp = ktime_set(-1L, 0); 1867 sk->sk_stamp = ktime_set(-1L, 0);
1842 1868
1869 /*
1870 * Before updating sk_refcnt, we must commit prior changes to memory
1871 * (Documentation/RCU/rculist_nulls.txt for details)
1872 */
1873 smp_wmb();
1843 atomic_set(&sk->sk_refcnt, 1); 1874 atomic_set(&sk->sk_refcnt, 1);
1844 atomic_set(&sk->sk_wmem_alloc, 1); 1875 atomic_set(&sk->sk_wmem_alloc, 1);
1845 atomic_set(&sk->sk_drops, 0); 1876 atomic_set(&sk->sk_drops, 0);