aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c54
1 files changed, 16 insertions, 38 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 5393b4b719d7..c0fc6bdad1e3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -925,8 +925,8 @@ set_rcvbuf:
925EXPORT_SYMBOL(sock_setsockopt); 925EXPORT_SYMBOL(sock_setsockopt);
926 926
927 927
928void cred_to_ucred(struct pid *pid, const struct cred *cred, 928static void cred_to_ucred(struct pid *pid, const struct cred *cred,
929 struct ucred *ucred) 929 struct ucred *ucred)
930{ 930{
931 ucred->pid = pid_vnr(pid); 931 ucred->pid = pid_vnr(pid);
932 ucred->uid = ucred->gid = -1; 932 ucred->uid = ucred->gid = -1;
@@ -937,7 +937,6 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
937 ucred->gid = from_kgid_munged(current_ns, cred->egid); 937 ucred->gid = from_kgid_munged(current_ns, cred->egid);
938 } 938 }
939} 939}
940EXPORT_SYMBOL_GPL(cred_to_ucred);
941 940
942int sock_getsockopt(struct socket *sock, int level, int optname, 941int sock_getsockopt(struct socket *sock, int level, int optname,
943 char __user *optval, int __user *optlen) 942 char __user *optval, int __user *optlen)
@@ -1168,6 +1167,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1168 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1167 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1169 break; 1168 break;
1170 1169
1170 case SO_BPF_EXTENSIONS:
1171 v.val = bpf_tell_extensions();
1172 break;
1173
1171 case SO_SELECT_ERR_QUEUE: 1174 case SO_SELECT_ERR_QUEUE:
1172 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 1175 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1173 break; 1176 break;
@@ -1308,19 +1311,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
1308 module_put(owner); 1311 module_put(owner);
1309} 1312}
1310 1313
1311#if IS_ENABLED(CONFIG_NET_CLS_CGROUP) 1314#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1312void sock_update_classid(struct sock *sk)
1313{
1314 u32 classid;
1315
1316 classid = task_cls_classid(current);
1317 if (classid != sk->sk_classid)
1318 sk->sk_classid = classid;
1319}
1320EXPORT_SYMBOL(sock_update_classid);
1321#endif
1322
1323#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1324void sock_update_netprioidx(struct sock *sk) 1315void sock_update_netprioidx(struct sock *sk)
1325{ 1316{
1326 if (in_interrupt()) 1317 if (in_interrupt())
@@ -1666,22 +1657,6 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1666EXPORT_SYMBOL(sock_wmalloc); 1657EXPORT_SYMBOL(sock_wmalloc);
1667 1658
1668/* 1659/*
1669 * Allocate a skb from the socket's receive buffer.
1670 */
1671struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1672 gfp_t priority)
1673{
1674 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1675 struct sk_buff *skb = alloc_skb(size, priority);
1676 if (skb) {
1677 skb_set_owner_r(skb, sk);
1678 return skb;
1679 }
1680 }
1681 return NULL;
1682}
1683
1684/*
1685 * Allocate a memory block from the socket's option memory buffer. 1660 * Allocate a memory block from the socket's option memory buffer.
1686 */ 1661 */
1687void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1662void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
@@ -1800,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1800 while (order) { 1775 while (order) {
1801 if (npages >= 1 << order) { 1776 if (npages >= 1 << order) {
1802 page = alloc_pages(sk->sk_allocation | 1777 page = alloc_pages(sk->sk_allocation |
1803 __GFP_COMP | __GFP_NOWARN, 1778 __GFP_COMP |
1779 __GFP_NOWARN |
1780 __GFP_NORETRY,
1804 order); 1781 order);
1805 if (page) 1782 if (page)
1806 goto fill_page; 1783 goto fill_page;
@@ -1865,14 +1842,12 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1865 put_page(pfrag->page); 1842 put_page(pfrag->page);
1866 } 1843 }
1867 1844
1868 /* We restrict high order allocations to users that can afford to wait */ 1845 order = SKB_FRAG_PAGE_ORDER;
1869 order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1870
1871 do { 1846 do {
1872 gfp_t gfp = prio; 1847 gfp_t gfp = prio;
1873 1848
1874 if (order) 1849 if (order)
1875 gfp |= __GFP_COMP | __GFP_NOWARN; 1850 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1876 pfrag->page = alloc_pages(gfp, order); 1851 pfrag->page = alloc_pages(gfp, order);
1877 if (likely(pfrag->page)) { 1852 if (likely(pfrag->page)) {
1878 pfrag->offset = 0; 1853 pfrag->offset = 0;
@@ -2382,10 +2357,13 @@ void release_sock(struct sock *sk)
2382 if (sk->sk_backlog.tail) 2357 if (sk->sk_backlog.tail)
2383 __release_sock(sk); 2358 __release_sock(sk);
2384 2359
2360 /* Warning : release_cb() might need to release sk ownership,
2361 * ie call sock_release_ownership(sk) before us.
2362 */
2385 if (sk->sk_prot->release_cb) 2363 if (sk->sk_prot->release_cb)
2386 sk->sk_prot->release_cb(sk); 2364 sk->sk_prot->release_cb(sk);
2387 2365
2388 sk->sk_lock.owned = 0; 2366 sock_release_ownership(sk);
2389 if (waitqueue_active(&sk->sk_lock.wq)) 2367 if (waitqueue_active(&sk->sk_lock.wq))
2390 wake_up(&sk->sk_lock.wq); 2368 wake_up(&sk->sk_lock.wq);
2391 spin_unlock_bh(&sk->sk_lock.slock); 2369 spin_unlock_bh(&sk->sk_lock.slock);