aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorJon Maloy <jon.maloy@ericsson.com>2017-10-13 05:04:20 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-13 11:46:00 -0400
commit64ac5f5977df5b276374fb2f051082129f5cdb22 (patch)
tree4dd4044eb9df39cd26904a6c328b41d989b5414d /net/tipc/socket.c
parent38077b8ef831daba55913f7e24732b062d0bdebb (diff)
tipc: refactor function filter_rcv()
In the following commits we will need to handle multiple incoming and rejected/returned buffers in the function socket.c::filter_rcv(). As a preparation for this, we generalize the function by handling buffer queues instead of individual buffers. We also introduce a help function tipc_skb_reject(), and rename filter_rcv() to tipc_sk_filter_rcv() in line with other functions in socket.c. Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c161
1 files changed, 80 insertions, 81 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 7659e792ecdb..bc226f5a1be3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -111,7 +111,7 @@ struct tipc_sock {
111 struct rcu_head rcu; 111 struct rcu_head rcu;
112}; 112};
113 113
114static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 114static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
115static void tipc_data_ready(struct sock *sk); 115static void tipc_data_ready(struct sock *sk);
116static void tipc_write_space(struct sock *sk); 116static void tipc_write_space(struct sock *sk);
117static void tipc_sock_destruct(struct sock *sk); 117static void tipc_sock_destruct(struct sock *sk);
@@ -453,7 +453,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
453 msg_set_origport(msg, tsk->portid); 453 msg_set_origport(msg, tsk->portid);
454 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); 454 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
455 sk->sk_shutdown = 0; 455 sk->sk_shutdown = 0;
456 sk->sk_backlog_rcv = tipc_backlog_rcv; 456 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
457 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 457 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
458 sk->sk_data_ready = tipc_data_ready; 458 sk->sk_data_ready = tipc_data_ready;
459 sk->sk_write_space = tipc_write_space; 459 sk->sk_write_space = tipc_write_space;
@@ -850,16 +850,16 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
850} 850}
851 851
852/** 852/**
853 * tipc_sk_proto_rcv - receive a connection mng protocol message 853 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
854 * @tsk: receiving socket 854 * @tsk: receiving socket
855 * @skb: pointer to message buffer. 855 * @skb: pointer to message buffer.
856 */ 856 */
857static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 857static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
858 struct sk_buff_head *xmitq) 858 struct sk_buff_head *xmitq)
859{ 859{
860 struct sock *sk = &tsk->sk;
861 u32 onode = tsk_own_node(tsk);
862 struct tipc_msg *hdr = buf_msg(skb); 860 struct tipc_msg *hdr = buf_msg(skb);
861 u32 onode = tsk_own_node(tsk);
862 struct sock *sk = &tsk->sk;
863 int mtyp = msg_type(hdr); 863 int mtyp = msg_type(hdr);
864 bool conn_cong; 864 bool conn_cong;
865 865
@@ -1536,14 +1536,41 @@ static void tipc_sock_destruct(struct sock *sk)
1536 __skb_queue_purge(&sk->sk_receive_queue); 1536 __skb_queue_purge(&sk->sk_receive_queue);
1537} 1537}
1538 1538
1539static void tipc_sk_proto_rcv(struct sock *sk,
1540 struct sk_buff_head *inputq,
1541 struct sk_buff_head *xmitq)
1542{
1543 struct sk_buff *skb = __skb_dequeue(inputq);
1544 struct tipc_sock *tsk = tipc_sk(sk);
1545 struct tipc_msg *hdr = buf_msg(skb);
1546
1547 switch (msg_user(hdr)) {
1548 case CONN_MANAGER:
1549 tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
1550 return;
1551 case SOCK_WAKEUP:
1552 u32_del(&tsk->cong_links, msg_orignode(hdr));
1553 tsk->cong_link_cnt--;
1554 sk->sk_write_space(sk);
1555 break;
1556 case TOP_SRV:
1557 tipc_sk_top_evt(tsk, (void *)msg_data(hdr));
1558 break;
1559 default:
1560 break;
1561 }
1562
1563 kfree_skb(skb);
1564}
1565
1539/** 1566/**
1540 * filter_connect - Handle all incoming messages for a connection-based socket 1567 * tipc_filter_connect - Handle incoming message for a connection-based socket
1541 * @tsk: TIPC socket 1568 * @tsk: TIPC socket
1542 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1569 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1543 * 1570 *
1544 * Returns true if everything ok, false otherwise 1571 * Returns true if everything ok, false otherwise
1545 */ 1572 */
1546static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1573static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1547{ 1574{
1548 struct sock *sk = &tsk->sk; 1575 struct sock *sk = &tsk->sk;
1549 struct net *net = sock_net(sk); 1576 struct net *net = sock_net(sk);
@@ -1657,7 +1684,7 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1657} 1684}
1658 1685
1659/** 1686/**
1660 * filter_rcv - validate incoming message 1687 * tipc_sk_filter_rcv - validate incoming message
1661 * @sk: socket 1688 * @sk: socket
1662 * @skb: pointer to message. 1689 * @skb: pointer to message.
1663 * 1690 *
@@ -1666,75 +1693,49 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1666 * 1693 *
1667 * Called with socket lock already taken 1694 * Called with socket lock already taken
1668 * 1695 *
1669 * Returns true if message was added to socket receive queue, otherwise false
1670 */ 1696 */
1671static bool filter_rcv(struct sock *sk, struct sk_buff *skb, 1697static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
1672 struct sk_buff_head *xmitq) 1698 struct sk_buff_head *xmitq)
1673{ 1699{
1700 bool sk_conn = !tipc_sk_type_connectionless(sk);
1674 struct tipc_sock *tsk = tipc_sk(sk); 1701 struct tipc_sock *tsk = tipc_sk(sk);
1675 struct tipc_msg *hdr = buf_msg(skb); 1702 struct tipc_msg *hdr = buf_msg(skb);
1676 unsigned int limit = rcvbuf_limit(sk, skb); 1703 struct net *net = sock_net(sk);
1677 int err = TIPC_OK; 1704 struct sk_buff_head inputq;
1705 int limit, err = TIPC_OK;
1678 1706
1679 if (unlikely(!msg_isdata(hdr))) { 1707 TIPC_SKB_CB(skb)->bytes_read = 0;
1680 switch (msg_user(hdr)) { 1708 __skb_queue_head_init(&inputq);
1681 case CONN_MANAGER: 1709 __skb_queue_tail(&inputq, skb);
1682 tipc_sk_proto_rcv(tsk, skb, xmitq);
1683 return false;
1684 case SOCK_WAKEUP:
1685 u32_del(&tsk->cong_links, msg_orignode(hdr));
1686 tsk->cong_link_cnt--;
1687 sk->sk_write_space(sk);
1688 break;
1689 case TOP_SRV:
1690 tipc_sk_top_evt(tsk, (void *)msg_data(hdr));
1691 break;
1692 default:
1693 break;
1694 }
1695 kfree_skb(skb);
1696 return false;
1697 }
1698 1710
1699 /* Drop if illegal message type */ 1711 if (unlikely(!msg_isdata(hdr)))
1700 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) { 1712 tipc_sk_proto_rcv(sk, &inputq, xmitq);
1701 kfree_skb(skb); 1713 else if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG))
1702 return false; 1714 return kfree_skb(skb);
1703 }
1704 1715
1705 /* Reject if wrong message type for current socket state */ 1716 /* Validate and add to receive buffer if there is space */
1706 if (tipc_sk_type_connectionless(sk)) { 1717 while ((skb = __skb_dequeue(&inputq))) {
1707 if (msg_connected(hdr)) { 1718 hdr = buf_msg(skb);
1719 limit = rcvbuf_limit(sk, skb);
1720 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
1721 (!sk_conn && msg_connected(hdr)))
1708 err = TIPC_ERR_NO_PORT; 1722 err = TIPC_ERR_NO_PORT;
1709 goto reject; 1723 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit)
1710 } 1724 err = TIPC_ERR_OVERLOAD;
1711 } else if (unlikely(!filter_connect(tsk, skb))) {
1712 err = TIPC_ERR_NO_PORT;
1713 goto reject;
1714 }
1715 1725
1716 /* Reject message if there isn't room to queue it */ 1726 if (unlikely(err)) {
1717 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) { 1727 tipc_skb_reject(net, err, skb, xmitq);
1718 err = TIPC_ERR_OVERLOAD; 1728 err = TIPC_OK;
1719 goto reject; 1729 continue;
1730 }
1731 __skb_queue_tail(&sk->sk_receive_queue, skb);
1732 skb_set_owner_r(skb, sk);
1733 sk->sk_data_ready(sk);
1720 } 1734 }
1721
1722 /* Enqueue message */
1723 TIPC_SKB_CB(skb)->bytes_read = 0;
1724 __skb_queue_tail(&sk->sk_receive_queue, skb);
1725 skb_set_owner_r(skb, sk);
1726
1727 sk->sk_data_ready(sk);
1728 return true;
1729
1730reject:
1731 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1732 __skb_queue_tail(xmitq, skb);
1733 return false;
1734} 1735}
1735 1736
1736/** 1737/**
1737 * tipc_backlog_rcv - handle incoming message from backlog queue 1738 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
1738 * @sk: socket 1739 * @sk: socket
1739 * @skb: message 1740 * @skb: message
1740 * 1741 *
@@ -1742,27 +1743,25 @@ reject:
1742 * 1743 *
1743 * Returns 0 1744 * Returns 0
1744 */ 1745 */
1745static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) 1746static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1746{ 1747{
1747 unsigned int truesize = skb->truesize; 1748 unsigned int before = sk_rmem_alloc_get(sk);
1748 struct sk_buff_head xmitq; 1749 struct sk_buff_head xmitq;
1749 u32 dnode, selector; 1750 u32 dnode, selector;
1751 unsigned int added;
1750 1752
1751 __skb_queue_head_init(&xmitq); 1753 __skb_queue_head_init(&xmitq);
1752 1754
1753 if (likely(filter_rcv(sk, skb, &xmitq))) { 1755 tipc_sk_filter_rcv(sk, skb, &xmitq);
1754 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); 1756 added = sk_rmem_alloc_get(sk) - before;
1755 return 0; 1757 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
1756 }
1757
1758 if (skb_queue_empty(&xmitq))
1759 return 0;
1760 1758
1761 /* Send response/rejected message */ 1759 /* Send pending response/rejected messages, if any */
1762 skb = __skb_dequeue(&xmitq); 1760 while ((skb = __skb_dequeue(&xmitq))) {
1763 dnode = msg_destnode(buf_msg(skb)); 1761 selector = msg_origport(buf_msg(skb));
1764 selector = msg_origport(buf_msg(skb)); 1762 dnode = msg_destnode(buf_msg(skb));
1765 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 1763 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1764 }
1766 return 0; 1765 return 0;
1767} 1766}
1768 1767
@@ -1794,7 +1793,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1794 1793
1795 /* Add message directly to receive queue if possible */ 1794 /* Add message directly to receive queue if possible */
1796 if (!sock_owned_by_user(sk)) { 1795 if (!sock_owned_by_user(sk)) {
1797 filter_rcv(sk, skb, xmitq); 1796 tipc_sk_filter_rcv(sk, skb, xmitq);
1798 continue; 1797 continue;
1799 } 1798 }
1800 1799