aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-02-05 08:36:38 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-05 19:00:02 -0500
commitd570d86497eeb11410b1c096d82ade11bcdd966c (patch)
tree8062856ffb899f95b7ec2f4a42aad004d8fc05c2 /net/tipc/socket.c
parent1186adf7df04e3b4298943fe89d9741ab42e30ff (diff)
tipc: enqueue arrived buffers in socket in separate function
The code for enqueuing arriving buffers in the function tipc_sk_rcv() contains long code lines and currently goes to two indentation levels. As a cosmetic preparaton for the next commits, we break it out into a separate function. Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f9cd587e4090..1d98bfcda6f6 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1765,6 +1765,35 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1765} 1765}
1766 1766
1767/** 1767/**
1768 * tipc_sk_enqueue_skb - enqueue buffer to socket or backlog queue
1769 * @sk: socket
1770 * @skb: pointer to message. Set to NULL if buffer is consumed.
1771 * @dnode: if buffer should be forwarded/returned, send to this node
1772 *
1773 * Caller must hold socket lock
1774 *
1775 * Returns TIPC_OK (0) or -tipc error code
1776 */
1777static int tipc_sk_enqueue_skb(struct sock *sk, struct sk_buff **skb)
1778{
1779 unsigned int lim;
1780 atomic_t *dcnt;
1781
1782 if (unlikely(!*skb))
1783 return TIPC_OK;
1784 if (!sock_owned_by_user(sk))
1785 return filter_rcv(sk, skb);
1786 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1787 if (sk->sk_backlog.len)
1788 atomic_set(dcnt, 0);
1789 lim = rcvbuf_limit(sk, *skb) + atomic_read(dcnt);
1790 if (unlikely(sk_add_backlog(sk, *skb, lim)))
1791 return -TIPC_ERR_OVERLOAD;
1792 *skb = NULL;
1793 return TIPC_OK;
1794}
1795
1796/**
1768 * tipc_sk_rcv - handle incoming message 1797 * tipc_sk_rcv - handle incoming message
1769 * @skb: buffer containing arriving message 1798 * @skb: buffer containing arriving message
1770 * Consumes buffer 1799 * Consumes buffer
@@ -1776,8 +1805,7 @@ int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
1776 struct tipc_net *tn; 1805 struct tipc_net *tn;
1777 struct sock *sk; 1806 struct sock *sk;
1778 u32 dport = msg_destport(buf_msg(skb)); 1807 u32 dport = msg_destport(buf_msg(skb));
1779 int err = TIPC_OK; 1808 int err;
1780 uint limit;
1781 u32 dnode; 1809 u32 dnode;
1782 1810
1783 /* Validate destination and message */ 1811 /* Validate destination and message */
@@ -1788,20 +1816,8 @@ int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
1788 } 1816 }
1789 sk = &tsk->sk; 1817 sk = &tsk->sk;
1790 1818
1791 /* Queue message */
1792 spin_lock_bh(&sk->sk_lock.slock); 1819 spin_lock_bh(&sk->sk_lock.slock);
1793 1820 err = tipc_sk_enqueue_skb(sk, &skb);
1794 if (!sock_owned_by_user(sk)) {
1795 err = filter_rcv(sk, &skb);
1796 } else {
1797 if (sk->sk_backlog.len == 0)
1798 atomic_set(&tsk->dupl_rcvcnt, 0);
1799 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1800 if (likely(!sk_add_backlog(sk, skb, limit)))
1801 skb = NULL;
1802 else
1803 err = -TIPC_ERR_OVERLOAD;
1804 }
1805 spin_unlock_bh(&sk->sk_lock.slock); 1821 spin_unlock_bh(&sk->sk_lock.slock);
1806 sock_put(sk); 1822 sock_put(sk);
1807exit: 1823exit: