aboutsummaryrefslogtreecommitdiffstats
path: root/net/unix/af_unix.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r--net/unix/af_unix.c80
1 files changed, 44 insertions, 36 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c4ce243824bb..c1f403bed683 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
1246 return 0; 1246 return 0;
1247} 1247}
1248 1248
1249static void unix_sock_inherit_flags(const struct socket *old,
1250 struct socket *new)
1251{
1252 if (test_bit(SOCK_PASSCRED, &old->flags))
1253 set_bit(SOCK_PASSCRED, &new->flags);
1254 if (test_bit(SOCK_PASSSEC, &old->flags))
1255 set_bit(SOCK_PASSSEC, &new->flags);
1256}
1257
1249static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1258static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1250{ 1259{
1251 struct sock *sk = sock->sk; 1260 struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1280 /* attach accepted sock to socket */ 1289 /* attach accepted sock to socket */
1281 unix_state_lock(tsk); 1290 unix_state_lock(tsk);
1282 newsock->state = SS_CONNECTED; 1291 newsock->state = SS_CONNECTED;
1292 unix_sock_inherit_flags(sock, newsock);
1283 sock_graft(tsk, newsock); 1293 sock_graft(tsk, newsock);
1284 unix_state_unlock(tsk); 1294 unix_state_unlock(tsk);
1285 return 0; 1295 return 0;
@@ -1479,7 +1489,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1479 MAX_SKB_FRAGS * PAGE_SIZE); 1489 MAX_SKB_FRAGS * PAGE_SIZE);
1480 1490
1481 skb = sock_alloc_send_pskb(sk, len - data_len, data_len, 1491 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1482 msg->msg_flags & MSG_DONTWAIT, &err); 1492 msg->msg_flags & MSG_DONTWAIT, &err,
1493 PAGE_ALLOC_COSTLY_ORDER);
1483 if (skb == NULL) 1494 if (skb == NULL)
1484 goto out; 1495 goto out;
1485 1496
@@ -1596,6 +1607,10 @@ out:
1596 return err; 1607 return err;
1597} 1608}
1598 1609
1610/* We use paged skbs for stream sockets, and limit occupancy to 32768
1611 * bytes, and a minimun of a full page.
1612 */
1613#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1599 1614
1600static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, 1615static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1601 struct msghdr *msg, size_t len) 1616 struct msghdr *msg, size_t len)
@@ -1609,6 +1624,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1609 struct scm_cookie tmp_scm; 1624 struct scm_cookie tmp_scm;
1610 bool fds_sent = false; 1625 bool fds_sent = false;
1611 int max_level; 1626 int max_level;
1627 int data_len;
1612 1628
1613 if (NULL == siocb->scm) 1629 if (NULL == siocb->scm)
1614 siocb->scm = &tmp_scm; 1630 siocb->scm = &tmp_scm;
@@ -1635,40 +1651,22 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1635 goto pipe_err; 1651 goto pipe_err;
1636 1652
1637 while (sent < len) { 1653 while (sent < len) {
1638 /* 1654 size = len - sent;
1639 * Optimisation for the fact that under 0.01% of X
1640 * messages typically need breaking up.
1641 */
1642
1643 size = len-sent;
1644 1655
1645 /* Keep two messages in the pipe so it schedules better */ 1656 /* Keep two messages in the pipe so it schedules better */
1646 if (size > ((sk->sk_sndbuf >> 1) - 64)) 1657 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1647 size = (sk->sk_sndbuf >> 1) - 64;
1648 1658
1649 if (size > SKB_MAX_ALLOC) 1659 /* allow fallback to order-0 allocations */
1650 size = SKB_MAX_ALLOC; 1660 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1651 1661
1652 /* 1662 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1653 * Grab a buffer
1654 */
1655 1663
1656 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, 1664 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1657 &err); 1665 msg->msg_flags & MSG_DONTWAIT, &err,
1658 1666 get_order(UNIX_SKB_FRAGS_SZ));
1659 if (skb == NULL) 1667 if (!skb)
1660 goto out_err; 1668 goto out_err;
1661 1669
1662 /*
1663 * If you pass two values to the sock_alloc_send_skb
1664 * it tries to grab the large buffer with GFP_NOFS
1665 * (which can fail easily), and if it fails grab the
1666 * fallback size buffer which is under a page and will
1667 * succeed. [Alan]
1668 */
1669 size = min_t(int, size, skb_tailroom(skb));
1670
1671
1672 /* Only send the fds in the first buffer */ 1670 /* Only send the fds in the first buffer */
1673 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); 1671 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1674 if (err < 0) { 1672 if (err < 0) {
@@ -1678,7 +1676,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1678 max_level = err + 1; 1676 max_level = err + 1;
1679 fds_sent = true; 1677 fds_sent = true;
1680 1678
1681 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1679 skb_put(skb, size - data_len);
1680 skb->data_len = data_len;
1681 skb->len = size;
1682 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
1683 sent, size);
1682 if (err) { 1684 if (err) {
1683 kfree_skb(skb); 1685 kfree_skb(skb);
1684 goto out_err; 1686 goto out_err;
@@ -1890,6 +1892,11 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1890 return timeo; 1892 return timeo;
1891} 1893}
1892 1894
1895static unsigned int unix_skb_len(const struct sk_buff *skb)
1896{
1897 return skb->len - UNIXCB(skb).consumed;
1898}
1899
1893static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, 1900static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1894 struct msghdr *msg, size_t size, 1901 struct msghdr *msg, size_t size,
1895 int flags) 1902 int flags)
@@ -1977,8 +1984,8 @@ again:
1977 } 1984 }
1978 1985
1979 skip = sk_peek_offset(sk, flags); 1986 skip = sk_peek_offset(sk, flags);
1980 while (skip >= skb->len) { 1987 while (skip >= unix_skb_len(skb)) {
1981 skip -= skb->len; 1988 skip -= unix_skb_len(skb);
1982 last = skb; 1989 last = skb;
1983 skb = skb_peek_next(skb, &sk->sk_receive_queue); 1990 skb = skb_peek_next(skb, &sk->sk_receive_queue);
1984 if (!skb) 1991 if (!skb)
@@ -2005,8 +2012,9 @@ again:
2005 sunaddr = NULL; 2012 sunaddr = NULL;
2006 } 2013 }
2007 2014
2008 chunk = min_t(unsigned int, skb->len - skip, size); 2015 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2009 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) { 2016 if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
2017 msg->msg_iov, chunk)) {
2010 if (copied == 0) 2018 if (copied == 0)
2011 copied = -EFAULT; 2019 copied = -EFAULT;
2012 break; 2020 break;
@@ -2016,14 +2024,14 @@ again:
2016 2024
2017 /* Mark read part of skb as used */ 2025 /* Mark read part of skb as used */
2018 if (!(flags & MSG_PEEK)) { 2026 if (!(flags & MSG_PEEK)) {
2019 skb_pull(skb, chunk); 2027 UNIXCB(skb).consumed += chunk;
2020 2028
2021 sk_peek_offset_bwd(sk, chunk); 2029 sk_peek_offset_bwd(sk, chunk);
2022 2030
2023 if (UNIXCB(skb).fp) 2031 if (UNIXCB(skb).fp)
2024 unix_detach_fds(siocb->scm, skb); 2032 unix_detach_fds(siocb->scm, skb);
2025 2033
2026 if (skb->len) 2034 if (unix_skb_len(skb))
2027 break; 2035 break;
2028 2036
2029 skb_unlink(skb, &sk->sk_receive_queue); 2037 skb_unlink(skb, &sk->sk_receive_queue);
@@ -2107,7 +2115,7 @@ long unix_inq_len(struct sock *sk)
2107 if (sk->sk_type == SOCK_STREAM || 2115 if (sk->sk_type == SOCK_STREAM ||
2108 sk->sk_type == SOCK_SEQPACKET) { 2116 sk->sk_type == SOCK_SEQPACKET) {
2109 skb_queue_walk(&sk->sk_receive_queue, skb) 2117 skb_queue_walk(&sk->sk_receive_queue, skb)
2110 amount += skb->len; 2118 amount += unix_skb_len(skb);
2111 } else { 2119 } else {
2112 skb = skb_peek(&sk->sk_receive_queue); 2120 skb = skb_peek(&sk->sk_receive_queue);
2113 if (skb) 2121 if (skb)