aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-02-05 08:36:40 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-05 19:00:02 -0500
commit94153e36e709e78fc4e1f93dc4e4da785690c7d1 (patch)
tree153c7d99adc7566b0d4c67b0cdade977b41b6573 /net/tipc/socket.c
parente3a77561e7d326e18881ef3cb84807892b353459 (diff)
tipc: use existing sk_write_queue for outgoing packet chain
The list for outgoing traffic buffers from a socket is currently allocated on the stack. This forces us to initialize the queue for each sent message, something costing extra CPU cycles in the most critical data path. Later in this series we will introduce a new safe input buffer queue, something that would force us to initialize even the spinlock of the outgoing queue. A closer analysis reveals that the queue always is filled and emptied within the same lock_sock() session. It is therefore safe to use a queue aggregated in the socket itself for this purpose. Since there already exists a queue for this in struct sock, sk_write_queue, we introduce use of that queue in this commit. Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e14b2aedb212..611a04fb0ddc 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -69,8 +69,6 @@
69 * @pub_count: total # of publications port has made during its lifetime 69 * @pub_count: total # of publications port has made during its lifetime
70 * @probing_state: 70 * @probing_state:
71 * @probing_intv: 71 * @probing_intv:
72 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
73 * @peer_name: the peer of the connection, if any
74 * @conn_timeout: the time we can wait for an unresponded setup request 72 * @conn_timeout: the time we can wait for an unresponded setup request
75 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 73 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
76 * @link_cong: non-zero if owner must sleep because of link congestion 74 * @link_cong: non-zero if owner must sleep because of link congestion
@@ -737,7 +735,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
737 struct tipc_sock *tsk = tipc_sk(sk); 735 struct tipc_sock *tsk = tipc_sk(sk);
738 struct net *net = sock_net(sk); 736 struct net *net = sock_net(sk);
739 struct tipc_msg *mhdr = &tsk->phdr; 737 struct tipc_msg *mhdr = &tsk->phdr;
740 struct sk_buff_head head; 738 struct sk_buff_head *pktchain = &sk->sk_write_queue;
741 struct iov_iter save = msg->msg_iter; 739 struct iov_iter save = msg->msg_iter;
742 uint mtu; 740 uint mtu;
743 int rc; 741 int rc;
@@ -753,13 +751,12 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
753 751
754new_mtu: 752new_mtu:
755 mtu = tipc_bclink_get_mtu(); 753 mtu = tipc_bclink_get_mtu();
756 __skb_queue_head_init(&head); 754 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
757 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
758 if (unlikely(rc < 0)) 755 if (unlikely(rc < 0))
759 return rc; 756 return rc;
760 757
761 do { 758 do {
762 rc = tipc_bclink_xmit(net, &head); 759 rc = tipc_bclink_xmit(net, pktchain);
763 if (likely(rc >= 0)) { 760 if (likely(rc >= 0)) {
764 rc = dsz; 761 rc = dsz;
765 break; 762 break;
@@ -773,7 +770,7 @@ new_mtu:
773 tipc_sk(sk)->link_cong = 1; 770 tipc_sk(sk)->link_cong = 1;
774 rc = tipc_wait_for_sndmsg(sock, &timeo); 771 rc = tipc_wait_for_sndmsg(sock, &timeo);
775 if (rc) 772 if (rc)
776 __skb_queue_purge(&head); 773 __skb_queue_purge(pktchain);
777 } while (!rc); 774 } while (!rc);
778 return rc; 775 return rc;
779} 776}
@@ -897,7 +894,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
897 struct net *net = sock_net(sk); 894 struct net *net = sock_net(sk);
898 struct tipc_msg *mhdr = &tsk->phdr; 895 struct tipc_msg *mhdr = &tsk->phdr;
899 u32 dnode, dport; 896 u32 dnode, dport;
900 struct sk_buff_head head; 897 struct sk_buff_head *pktchain = &sk->sk_write_queue;
901 struct sk_buff *skb; 898 struct sk_buff *skb;
902 struct tipc_name_seq *seq = &dest->addr.nameseq; 899 struct tipc_name_seq *seq = &dest->addr.nameseq;
903 struct iov_iter save; 900 struct iov_iter save;
@@ -972,15 +969,14 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
972 save = m->msg_iter; 969 save = m->msg_iter;
973new_mtu: 970new_mtu:
974 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 971 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
975 __skb_queue_head_init(&head); 972 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
976 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
977 if (rc < 0) 973 if (rc < 0)
978 goto exit; 974 goto exit;
979 975
980 do { 976 do {
981 skb = skb_peek(&head); 977 skb = skb_peek(pktchain);
982 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; 978 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
983 rc = tipc_link_xmit(net, &head, dnode, tsk->portid); 979 rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
984 if (likely(rc >= 0)) { 980 if (likely(rc >= 0)) {
985 if (sock->state != SS_READY) 981 if (sock->state != SS_READY)
986 sock->state = SS_CONNECTING; 982 sock->state = SS_CONNECTING;
@@ -996,7 +992,7 @@ new_mtu:
996 tsk->link_cong = 1; 992 tsk->link_cong = 1;
997 rc = tipc_wait_for_sndmsg(sock, &timeo); 993 rc = tipc_wait_for_sndmsg(sock, &timeo);
998 if (rc) 994 if (rc)
999 __skb_queue_purge(&head); 995 __skb_queue_purge(pktchain);
1000 } while (!rc); 996 } while (!rc);
1001exit: 997exit:
1002 if (iocb) 998 if (iocb)
@@ -1054,7 +1050,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1054 struct net *net = sock_net(sk); 1050 struct net *net = sock_net(sk);
1055 struct tipc_sock *tsk = tipc_sk(sk); 1051 struct tipc_sock *tsk = tipc_sk(sk);
1056 struct tipc_msg *mhdr = &tsk->phdr; 1052 struct tipc_msg *mhdr = &tsk->phdr;
1057 struct sk_buff_head head; 1053 struct sk_buff_head *pktchain = &sk->sk_write_queue;
1058 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1054 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1059 u32 portid = tsk->portid; 1055 u32 portid = tsk->portid;
1060 int rc = -EINVAL; 1056 int rc = -EINVAL;
@@ -1091,13 +1087,12 @@ next:
1091 save = m->msg_iter; 1087 save = m->msg_iter;
1092 mtu = tsk->max_pkt; 1088 mtu = tsk->max_pkt;
1093 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); 1089 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1094 __skb_queue_head_init(&head); 1090 rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
1095 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
1096 if (unlikely(rc < 0)) 1091 if (unlikely(rc < 0))
1097 goto exit; 1092 goto exit;
1098 do { 1093 do {
1099 if (likely(!tsk_conn_cong(tsk))) { 1094 if (likely(!tsk_conn_cong(tsk))) {
1100 rc = tipc_link_xmit(net, &head, dnode, portid); 1095 rc = tipc_link_xmit(net, pktchain, dnode, portid);
1101 if (likely(!rc)) { 1096 if (likely(!rc)) {
1102 tsk->sent_unacked++; 1097 tsk->sent_unacked++;
1103 sent += send; 1098 sent += send;
@@ -1117,7 +1112,7 @@ next:
1117 } 1112 }
1118 rc = tipc_wait_for_sndpkt(sock, &timeo); 1113 rc = tipc_wait_for_sndpkt(sock, &timeo);
1119 if (rc) 1114 if (rc)
1120 __skb_queue_purge(&head); 1115 __skb_queue_purge(pktchain);
1121 } while (!rc); 1116 } while (!rc);
1122exit: 1117exit:
1123 if (iocb) 1118 if (iocb)