aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 17:27:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 17:27:06 -0500
commit70e71ca0af244f48a5dcf56dc435243792e3a495 (patch)
treef7d9c4c4d9a857a00043e9bf6aa2d6f533a34778 /net/tipc/socket.c
parentbae41e45b7400496b9bf0c70c6004419d9987819 (diff)
parent00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) New offloading infrastructure and example 'rocker' driver for offloading of switching and routing to hardware. This work was done by a large group of dedicated individuals, not limited to: Scott Feldman, Jiri Pirko, Thomas Graf, John Fastabend, Jamal Hadi Salim, Andy Gospodarek, Florian Fainelli, Roopa Prabhu 2) Start making the networking operate on IOV iterators instead of modifying iov objects in-situ during transfers. Thanks to Al Viro and Herbert Xu. 3) A set of new netlink interfaces for the TIPC stack, from Richard Alpe. 4) Remove unnecessary looping during ipv6 routing lookups, from Martin KaFai Lau. 5) Add PAUSE frame generation support to gianfar driver, from Matei Pavaluca. 6) Allow for larger reordering levels in TCP, which are easily achievable in the real world right now, from Eric Dumazet. 7) Add a variable of napi_schedule that doesn't need to disable cpu interrupts, from Eric Dumazet. 8) Use a doubly linked list to optimize neigh_parms_release(), from Nicolas Dichtel. 9) Various enhancements to the kernel BPF verifier, and allow eBPF programs to actually be attached to sockets. From Alexei Starovoitov. 10) Support TSO/LSO in sunvnet driver, from David L Stevens. 11) Allow controlling ECN usage via routing metrics, from Florian Westphal. 12) Remote checksum offload, from Tom Herbert. 13) Add split-header receive, BQL, and xmit_more support to amd-xgbe driver, from Thomas Lendacky. 14) Add MPLS support to openvswitch, from Simon Horman. 15) Support wildcard tunnel endpoints in ipv6 tunnels, from Steffen Klassert. 16) Do gro flushes on a per-device basis using a timer, from Eric Dumazet. This tries to resolve the conflicting goals between the desired handling of bulk vs. RPC-like traffic. 17) Allow userspace to ask for the CPU upon what a packet was received/steered, via SO_INCOMING_CPU. From Eric Dumazet. 18) Limit GSO packets to half the current congestion window, from Eric Dumazet. 19) Add a generic helper so that all drivers set their RSS keys in a consistent way, from Eric Dumazet. 20) Add xmit_more support to enic driver, from Govindarajulu Varadarajan. 21) Add VLAN packet scheduler action, from Jiri Pirko. 22) Support configurable RSS hash functions via ethtool, from Eyal Perry. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1820 commits) Fix race condition between vxlan_sock_add and vxlan_sock_release net/macb: fix compilation warning for print_hex_dump() called with skb->mac_header net/mlx4: Add support for A0 steering net/mlx4: Refactor QUERY_PORT net/mlx4_core: Add explicit error message when rule doesn't meet configuration net/mlx4: Add A0 hybrid steering net/mlx4: Add mlx4_bitmap zone allocator net/mlx4: Add a check if there are too many reserved QPs net/mlx4: Change QP allocation scheme net/mlx4_core: Use tasklet for user-space CQ completion events net/mlx4_core: Mask out host side virtualization features for guests net/mlx4_en: Set csum level for encapsulated packets be2net: Export tunnel offloads only when a VxLAN tunnel is created gianfar: Fix dma check map error when DMA_API_DEBUG is enabled cxgb4/csiostor: Don't use MASTER_MUST for fw_hello call net: fec: only enable mdio interrupt before phy device link up net: fec: clear all interrupt events to support i.MX6SX net: fec: reset fep link status in suspend function net: sock: fix access via invalid file descriptor net: introduce helper macro for_each_cmsghdr ...
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c419
1 files changed, 312 insertions, 107 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 51bddc236a15..4731cad99d1c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -121,6 +121,14 @@ static const struct proto_ops msg_ops;
121static struct proto tipc_proto; 121static struct proto tipc_proto;
122static struct proto tipc_proto_kern; 122static struct proto tipc_proto_kern;
123 123
124static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
125 [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
126 [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
127 [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
128 [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
129 [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
130};
131
124/* 132/*
125 * Revised TIPC socket locking policy: 133 * Revised TIPC socket locking policy:
126 * 134 *
@@ -236,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk)
236 */ 244 */
237static void tsk_rej_rx_queue(struct sock *sk) 245static void tsk_rej_rx_queue(struct sock *sk)
238{ 246{
239 struct sk_buff *buf; 247 struct sk_buff *skb;
240 u32 dnode; 248 u32 dnode;
241 249
242 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 250 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
243 if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) 251 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
244 tipc_link_xmit(buf, dnode, 0); 252 tipc_link_xmit_skb(skb, dnode, 0);
245 } 253 }
246} 254}
247 255
@@ -454,7 +462,7 @@ static int tipc_release(struct socket *sock)
454{ 462{
455 struct sock *sk = sock->sk; 463 struct sock *sk = sock->sk;
456 struct tipc_sock *tsk; 464 struct tipc_sock *tsk;
457 struct sk_buff *buf; 465 struct sk_buff *skb;
458 u32 dnode; 466 u32 dnode;
459 467
460 /* 468 /*
@@ -473,11 +481,11 @@ static int tipc_release(struct socket *sock)
473 */ 481 */
474 dnode = tsk_peer_node(tsk); 482 dnode = tsk_peer_node(tsk);
475 while (sock->state != SS_DISCONNECTING) { 483 while (sock->state != SS_DISCONNECTING) {
476 buf = __skb_dequeue(&sk->sk_receive_queue); 484 skb = __skb_dequeue(&sk->sk_receive_queue);
477 if (buf == NULL) 485 if (skb == NULL)
478 break; 486 break;
479 if (TIPC_SKB_CB(buf)->handle != NULL) 487 if (TIPC_SKB_CB(skb)->handle != NULL)
480 kfree_skb(buf); 488 kfree_skb(skb);
481 else { 489 else {
482 if ((sock->state == SS_CONNECTING) || 490 if ((sock->state == SS_CONNECTING) ||
483 (sock->state == SS_CONNECTED)) { 491 (sock->state == SS_CONNECTED)) {
@@ -485,8 +493,8 @@ static int tipc_release(struct socket *sock)
485 tsk->connected = 0; 493 tsk->connected = 0;
486 tipc_node_remove_conn(dnode, tsk->ref); 494 tipc_node_remove_conn(dnode, tsk->ref);
487 } 495 }
488 if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) 496 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
489 tipc_link_xmit(buf, dnode, 0); 497 tipc_link_xmit_skb(skb, dnode, 0);
490 } 498 }
491 } 499 }
492 500
@@ -494,12 +502,12 @@ static int tipc_release(struct socket *sock)
494 tipc_sk_ref_discard(tsk->ref); 502 tipc_sk_ref_discard(tsk->ref);
495 k_cancel_timer(&tsk->timer); 503 k_cancel_timer(&tsk->timer);
496 if (tsk->connected) { 504 if (tsk->connected) {
497 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 505 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
498 SHORT_H_SIZE, 0, dnode, tipc_own_addr, 506 SHORT_H_SIZE, 0, dnode, tipc_own_addr,
499 tsk_peer_port(tsk), 507 tsk_peer_port(tsk),
500 tsk->ref, TIPC_ERR_NO_PORT); 508 tsk->ref, TIPC_ERR_NO_PORT);
501 if (buf) 509 if (skb)
502 tipc_link_xmit(buf, dnode, tsk->ref); 510 tipc_link_xmit_skb(skb, dnode, tsk->ref);
503 tipc_node_remove_conn(dnode, tsk->ref); 511 tipc_node_remove_conn(dnode, tsk->ref);
504 } 512 }
505 k_term_timer(&tsk->timer); 513 k_term_timer(&tsk->timer);
@@ -692,7 +700,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
692 * tipc_sendmcast - send multicast message 700 * tipc_sendmcast - send multicast message
693 * @sock: socket structure 701 * @sock: socket structure
694 * @seq: destination address 702 * @seq: destination address
695 * @iov: message data to send 703 * @msg: message to send
696 * @dsz: total length of message data 704 * @dsz: total length of message data
697 * @timeo: timeout to wait for wakeup 705 * @timeo: timeout to wait for wakeup
698 * 706 *
@@ -700,11 +708,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
700 * Returns the number of bytes sent on success, or errno 708 * Returns the number of bytes sent on success, or errno
701 */ 709 */
702static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 710static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
703 struct iovec *iov, size_t dsz, long timeo) 711 struct msghdr *msg, size_t dsz, long timeo)
704{ 712{
705 struct sock *sk = sock->sk; 713 struct sock *sk = sock->sk;
706 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; 714 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
707 struct sk_buff *buf; 715 struct sk_buff_head head;
708 uint mtu; 716 uint mtu;
709 int rc; 717 int rc;
710 718
@@ -719,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
719 727
720new_mtu: 728new_mtu:
721 mtu = tipc_bclink_get_mtu(); 729 mtu = tipc_bclink_get_mtu();
722 rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); 730 __skb_queue_head_init(&head);
731 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
723 if (unlikely(rc < 0)) 732 if (unlikely(rc < 0))
724 return rc; 733 return rc;
725 734
726 do { 735 do {
727 rc = tipc_bclink_xmit(buf); 736 rc = tipc_bclink_xmit(&head);
728 if (likely(rc >= 0)) { 737 if (likely(rc >= 0)) {
729 rc = dsz; 738 rc = dsz;
730 break; 739 break;
@@ -736,7 +745,7 @@ new_mtu:
736 tipc_sk(sk)->link_cong = 1; 745 tipc_sk(sk)->link_cong = 1;
737 rc = tipc_wait_for_sndmsg(sock, &timeo); 746 rc = tipc_wait_for_sndmsg(sock, &timeo);
738 if (rc) 747 if (rc)
739 kfree_skb_list(buf); 748 __skb_queue_purge(&head);
740 } while (!rc); 749 } while (!rc);
741 return rc; 750 return rc;
742} 751}
@@ -818,39 +827,6 @@ exit:
818 return TIPC_OK; 827 return TIPC_OK;
819} 828}
820 829
821/**
822 * dest_name_check - verify user is permitted to send to specified port name
823 * @dest: destination address
824 * @m: descriptor for message to be sent
825 *
826 * Prevents restricted configuration commands from being issued by
827 * unauthorized users.
828 *
829 * Returns 0 if permission is granted, otherwise errno
830 */
831static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
832{
833 struct tipc_cfg_msg_hdr hdr;
834
835 if (unlikely(dest->addrtype == TIPC_ADDR_ID))
836 return 0;
837 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
838 return 0;
839 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
840 return 0;
841 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
842 return -EACCES;
843
844 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
845 return -EMSGSIZE;
846 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
847 return -EFAULT;
848 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
849 return -EACCES;
850
851 return 0;
852}
853
854static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) 830static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
855{ 831{
856 struct sock *sk = sock->sk; 832 struct sock *sk = sock->sk;
@@ -897,13 +873,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
897 struct sock *sk = sock->sk; 873 struct sock *sk = sock->sk;
898 struct tipc_sock *tsk = tipc_sk(sk); 874 struct tipc_sock *tsk = tipc_sk(sk);
899 struct tipc_msg *mhdr = &tsk->phdr; 875 struct tipc_msg *mhdr = &tsk->phdr;
900 struct iovec *iov = m->msg_iov;
901 u32 dnode, dport; 876 u32 dnode, dport;
902 struct sk_buff *buf; 877 struct sk_buff_head head;
878 struct sk_buff *skb;
903 struct tipc_name_seq *seq = &dest->addr.nameseq; 879 struct tipc_name_seq *seq = &dest->addr.nameseq;
904 u32 mtu; 880 u32 mtu;
905 long timeo; 881 long timeo;
906 int rc = -EINVAL; 882 int rc;
907 883
908 if (unlikely(!dest)) 884 if (unlikely(!dest))
909 return -EDESTADDRREQ; 885 return -EDESTADDRREQ;
@@ -936,14 +912,11 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
936 tsk->conn_instance = dest->addr.name.name.instance; 912 tsk->conn_instance = dest->addr.name.name.instance;
937 } 913 }
938 } 914 }
939 rc = dest_name_check(dest, m);
940 if (rc)
941 goto exit;
942 915
943 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 916 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
944 917
945 if (dest->addrtype == TIPC_ADDR_MCAST) { 918 if (dest->addrtype == TIPC_ADDR_MCAST) {
946 rc = tipc_sendmcast(sock, seq, iov, dsz, timeo); 919 rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
947 goto exit; 920 goto exit;
948 } else if (dest->addrtype == TIPC_ADDR_NAME) { 921 } else if (dest->addrtype == TIPC_ADDR_NAME) {
949 u32 type = dest->addr.name.name.type; 922 u32 type = dest->addr.name.name.type;
@@ -974,13 +947,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
974 947
975new_mtu: 948new_mtu:
976 mtu = tipc_node_get_mtu(dnode, tsk->ref); 949 mtu = tipc_node_get_mtu(dnode, tsk->ref);
977 rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); 950 __skb_queue_head_init(&head);
951 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
978 if (rc < 0) 952 if (rc < 0)
979 goto exit; 953 goto exit;
980 954
981 do { 955 do {
982 TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; 956 skb = skb_peek(&head);
983 rc = tipc_link_xmit(buf, dnode, tsk->ref); 957 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
958 rc = tipc_link_xmit(&head, dnode, tsk->ref);
984 if (likely(rc >= 0)) { 959 if (likely(rc >= 0)) {
985 if (sock->state != SS_READY) 960 if (sock->state != SS_READY)
986 sock->state = SS_CONNECTING; 961 sock->state = SS_CONNECTING;
@@ -994,7 +969,7 @@ new_mtu:
994 tsk->link_cong = 1; 969 tsk->link_cong = 1;
995 rc = tipc_wait_for_sndmsg(sock, &timeo); 970 rc = tipc_wait_for_sndmsg(sock, &timeo);
996 if (rc) 971 if (rc)
997 kfree_skb_list(buf); 972 __skb_queue_purge(&head);
998 } while (!rc); 973 } while (!rc);
999exit: 974exit:
1000 if (iocb) 975 if (iocb)
@@ -1051,7 +1026,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1051 struct sock *sk = sock->sk; 1026 struct sock *sk = sock->sk;
1052 struct tipc_sock *tsk = tipc_sk(sk); 1027 struct tipc_sock *tsk = tipc_sk(sk);
1053 struct tipc_msg *mhdr = &tsk->phdr; 1028 struct tipc_msg *mhdr = &tsk->phdr;
1054 struct sk_buff *buf; 1029 struct sk_buff_head head;
1055 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1030 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1056 u32 ref = tsk->ref; 1031 u32 ref = tsk->ref;
1057 int rc = -EINVAL; 1032 int rc = -EINVAL;
@@ -1086,12 +1061,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1086next: 1061next:
1087 mtu = tsk->max_pkt; 1062 mtu = tsk->max_pkt;
1088 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); 1063 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1089 rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf); 1064 __skb_queue_head_init(&head);
1065 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
1090 if (unlikely(rc < 0)) 1066 if (unlikely(rc < 0))
1091 goto exit; 1067 goto exit;
1092 do { 1068 do {
1093 if (likely(!tsk_conn_cong(tsk))) { 1069 if (likely(!tsk_conn_cong(tsk))) {
1094 rc = tipc_link_xmit(buf, dnode, ref); 1070 rc = tipc_link_xmit(&head, dnode, ref);
1095 if (likely(!rc)) { 1071 if (likely(!rc)) {
1096 tsk->sent_unacked++; 1072 tsk->sent_unacked++;
1097 sent += send; 1073 sent += send;
@@ -1109,7 +1085,7 @@ next:
1109 } 1085 }
1110 rc = tipc_wait_for_sndpkt(sock, &timeo); 1086 rc = tipc_wait_for_sndpkt(sock, &timeo);
1111 if (rc) 1087 if (rc)
1112 kfree_skb_list(buf); 1088 __skb_queue_purge(&head);
1113 } while (!rc); 1089 } while (!rc);
1114exit: 1090exit:
1115 if (iocb) 1091 if (iocb)
@@ -1254,20 +1230,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1254 1230
1255static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) 1231static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1256{ 1232{
1257 struct sk_buff *buf = NULL; 1233 struct sk_buff *skb = NULL;
1258 struct tipc_msg *msg; 1234 struct tipc_msg *msg;
1259 u32 peer_port = tsk_peer_port(tsk); 1235 u32 peer_port = tsk_peer_port(tsk);
1260 u32 dnode = tsk_peer_node(tsk); 1236 u32 dnode = tsk_peer_node(tsk);
1261 1237
1262 if (!tsk->connected) 1238 if (!tsk->connected)
1263 return; 1239 return;
1264 buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, 1240 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
1265 tipc_own_addr, peer_port, tsk->ref, TIPC_OK); 1241 tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
1266 if (!buf) 1242 if (!skb)
1267 return; 1243 return;
1268 msg = buf_msg(buf); 1244 msg = buf_msg(skb);
1269 msg_set_msgcnt(msg, ack); 1245 msg_set_msgcnt(msg, ack);
1270 tipc_link_xmit(buf, dnode, msg_link_selector(msg)); 1246 tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
1271} 1247}
1272 1248
1273static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1249static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1372,8 +1348,7 @@ restart:
1372 sz = buf_len; 1348 sz = buf_len;
1373 m->msg_flags |= MSG_TRUNC; 1349 m->msg_flags |= MSG_TRUNC;
1374 } 1350 }
1375 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), 1351 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1376 m->msg_iov, sz);
1377 if (res) 1352 if (res)
1378 goto exit; 1353 goto exit;
1379 res = sz; 1354 res = sz;
@@ -1473,8 +1448,8 @@ restart:
1473 needed = (buf_len - sz_copied); 1448 needed = (buf_len - sz_copied);
1474 sz_to_copy = (sz <= needed) ? sz : needed; 1449 sz_to_copy = (sz <= needed) ? sz : needed;
1475 1450
1476 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, 1451 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1477 m->msg_iov, sz_to_copy); 1452 m, sz_to_copy);
1478 if (res) 1453 if (res)
1479 goto exit; 1454 goto exit;
1480 1455
@@ -1556,7 +1531,7 @@ static void tipc_data_ready(struct sock *sk)
1556 * @tsk: TIPC socket 1531 * @tsk: TIPC socket
1557 * @msg: message 1532 * @msg: message
1558 * 1533 *
1559 * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise 1534 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1560 */ 1535 */
1561static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) 1536static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1562{ 1537{
@@ -1723,20 +1698,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1723/** 1698/**
1724 * tipc_backlog_rcv - handle incoming message from backlog queue 1699 * tipc_backlog_rcv - handle incoming message from backlog queue
1725 * @sk: socket 1700 * @sk: socket
1726 * @buf: message 1701 * @skb: message
1727 * 1702 *
1728 * Caller must hold socket lock, but not port lock. 1703 * Caller must hold socket lock, but not port lock.
1729 * 1704 *
1730 * Returns 0 1705 * Returns 0
1731 */ 1706 */
1732static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) 1707static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1733{ 1708{
1734 int rc; 1709 int rc;
1735 u32 onode; 1710 u32 onode;
1736 struct tipc_sock *tsk = tipc_sk(sk); 1711 struct tipc_sock *tsk = tipc_sk(sk);
1737 uint truesize = buf->truesize; 1712 uint truesize = skb->truesize;
1738 1713
1739 rc = filter_rcv(sk, buf); 1714 rc = filter_rcv(sk, skb);
1740 1715
1741 if (likely(!rc)) { 1716 if (likely(!rc)) {
1742 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) 1717 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
@@ -1744,25 +1719,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
1744 return 0; 1719 return 0;
1745 } 1720 }
1746 1721
1747 if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc)) 1722 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
1748 return 0; 1723 return 0;
1749 1724
1750 tipc_link_xmit(buf, onode, 0); 1725 tipc_link_xmit_skb(skb, onode, 0);
1751 1726
1752 return 0; 1727 return 0;
1753} 1728}
1754 1729
1755/** 1730/**
1756 * tipc_sk_rcv - handle incoming message 1731 * tipc_sk_rcv - handle incoming message
1757 * @buf: buffer containing arriving message 1732 * @skb: buffer containing arriving message
1758 * Consumes buffer 1733 * Consumes buffer
1759 * Returns 0 if success, or errno: -EHOSTUNREACH 1734 * Returns 0 if success, or errno: -EHOSTUNREACH
1760 */ 1735 */
1761int tipc_sk_rcv(struct sk_buff *buf) 1736int tipc_sk_rcv(struct sk_buff *skb)
1762{ 1737{
1763 struct tipc_sock *tsk; 1738 struct tipc_sock *tsk;
1764 struct sock *sk; 1739 struct sock *sk;
1765 u32 dport = msg_destport(buf_msg(buf)); 1740 u32 dport = msg_destport(buf_msg(skb));
1766 int rc = TIPC_OK; 1741 int rc = TIPC_OK;
1767 uint limit; 1742 uint limit;
1768 u32 dnode; 1743 u32 dnode;
@@ -1770,7 +1745,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
1770 /* Validate destination and message */ 1745 /* Validate destination and message */
1771 tsk = tipc_sk_get(dport); 1746 tsk = tipc_sk_get(dport);
1772 if (unlikely(!tsk)) { 1747 if (unlikely(!tsk)) {
1773 rc = tipc_msg_eval(buf, &dnode); 1748 rc = tipc_msg_eval(skb, &dnode);
1774 goto exit; 1749 goto exit;
1775 } 1750 }
1776 sk = &tsk->sk; 1751 sk = &tsk->sk;
@@ -1779,12 +1754,12 @@ int tipc_sk_rcv(struct sk_buff *buf)
1779 spin_lock_bh(&sk->sk_lock.slock); 1754 spin_lock_bh(&sk->sk_lock.slock);
1780 1755
1781 if (!sock_owned_by_user(sk)) { 1756 if (!sock_owned_by_user(sk)) {
1782 rc = filter_rcv(sk, buf); 1757 rc = filter_rcv(sk, skb);
1783 } else { 1758 } else {
1784 if (sk->sk_backlog.len == 0) 1759 if (sk->sk_backlog.len == 0)
1785 atomic_set(&tsk->dupl_rcvcnt, 0); 1760 atomic_set(&tsk->dupl_rcvcnt, 0);
1786 limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt); 1761 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1787 if (sk_add_backlog(sk, buf, limit)) 1762 if (sk_add_backlog(sk, skb, limit))
1788 rc = -TIPC_ERR_OVERLOAD; 1763 rc = -TIPC_ERR_OVERLOAD;
1789 } 1764 }
1790 spin_unlock_bh(&sk->sk_lock.slock); 1765 spin_unlock_bh(&sk->sk_lock.slock);
@@ -1792,10 +1767,10 @@ int tipc_sk_rcv(struct sk_buff *buf)
1792 if (likely(!rc)) 1767 if (likely(!rc))
1793 return 0; 1768 return 0;
1794exit: 1769exit:
1795 if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc)) 1770 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
1796 return -EHOSTUNREACH; 1771 return -EHOSTUNREACH;
1797 1772
1798 tipc_link_xmit(buf, dnode, 0); 1773 tipc_link_xmit_skb(skb, dnode, 0);
1799 return (rc < 0) ? -EHOSTUNREACH : 0; 1774 return (rc < 0) ? -EHOSTUNREACH : 0;
1800} 1775}
1801 1776
@@ -2053,7 +2028,7 @@ static int tipc_shutdown(struct socket *sock, int how)
2053{ 2028{
2054 struct sock *sk = sock->sk; 2029 struct sock *sk = sock->sk;
2055 struct tipc_sock *tsk = tipc_sk(sk); 2030 struct tipc_sock *tsk = tipc_sk(sk);
2056 struct sk_buff *buf; 2031 struct sk_buff *skb;
2057 u32 dnode; 2032 u32 dnode;
2058 int res; 2033 int res;
2059 2034
@@ -2068,23 +2043,23 @@ static int tipc_shutdown(struct socket *sock, int how)
2068 2043
2069restart: 2044restart:
2070 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ 2045 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2071 buf = __skb_dequeue(&sk->sk_receive_queue); 2046 skb = __skb_dequeue(&sk->sk_receive_queue);
2072 if (buf) { 2047 if (skb) {
2073 if (TIPC_SKB_CB(buf)->handle != NULL) { 2048 if (TIPC_SKB_CB(skb)->handle != NULL) {
2074 kfree_skb(buf); 2049 kfree_skb(skb);
2075 goto restart; 2050 goto restart;
2076 } 2051 }
2077 if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) 2052 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
2078 tipc_link_xmit(buf, dnode, tsk->ref); 2053 tipc_link_xmit_skb(skb, dnode, tsk->ref);
2079 tipc_node_remove_conn(dnode, tsk->ref); 2054 tipc_node_remove_conn(dnode, tsk->ref);
2080 } else { 2055 } else {
2081 dnode = tsk_peer_node(tsk); 2056 dnode = tsk_peer_node(tsk);
2082 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 2057 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2083 TIPC_CONN_MSG, SHORT_H_SIZE, 2058 TIPC_CONN_MSG, SHORT_H_SIZE,
2084 0, dnode, tipc_own_addr, 2059 0, dnode, tipc_own_addr,
2085 tsk_peer_port(tsk), 2060 tsk_peer_port(tsk),
2086 tsk->ref, TIPC_CONN_SHUTDOWN); 2061 tsk->ref, TIPC_CONN_SHUTDOWN);
2087 tipc_link_xmit(buf, dnode, tsk->ref); 2062 tipc_link_xmit_skb(skb, dnode, tsk->ref);
2088 } 2063 }
2089 tsk->connected = 0; 2064 tsk->connected = 0;
2090 sock->state = SS_DISCONNECTING; 2065 sock->state = SS_DISCONNECTING;
@@ -2113,7 +2088,7 @@ static void tipc_sk_timeout(unsigned long ref)
2113{ 2088{
2114 struct tipc_sock *tsk; 2089 struct tipc_sock *tsk;
2115 struct sock *sk; 2090 struct sock *sk;
2116 struct sk_buff *buf = NULL; 2091 struct sk_buff *skb = NULL;
2117 u32 peer_port, peer_node; 2092 u32 peer_port, peer_node;
2118 2093
2119 tsk = tipc_sk_get(ref); 2094 tsk = tipc_sk_get(ref);
@@ -2131,20 +2106,20 @@ static void tipc_sk_timeout(unsigned long ref)
2131 2106
2132 if (tsk->probing_state == TIPC_CONN_PROBING) { 2107 if (tsk->probing_state == TIPC_CONN_PROBING) {
2133 /* Previous probe not answered -> self abort */ 2108 /* Previous probe not answered -> self abort */
2134 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 2109 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
2135 SHORT_H_SIZE, 0, tipc_own_addr, 2110 SHORT_H_SIZE, 0, tipc_own_addr,
2136 peer_node, ref, peer_port, 2111 peer_node, ref, peer_port,
2137 TIPC_ERR_NO_PORT); 2112 TIPC_ERR_NO_PORT);
2138 } else { 2113 } else {
2139 buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 2114 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
2140 0, peer_node, tipc_own_addr, 2115 0, peer_node, tipc_own_addr,
2141 peer_port, ref, TIPC_OK); 2116 peer_port, ref, TIPC_OK);
2142 tsk->probing_state = TIPC_CONN_PROBING; 2117 tsk->probing_state = TIPC_CONN_PROBING;
2143 k_start_timer(&tsk->timer, tsk->probing_interval); 2118 k_start_timer(&tsk->timer, tsk->probing_interval);
2144 } 2119 }
2145 bh_unlock_sock(sk); 2120 bh_unlock_sock(sk);
2146 if (buf) 2121 if (skb)
2147 tipc_link_xmit(buf, peer_node, ref); 2122 tipc_link_xmit_skb(skb, peer_node, ref);
2148exit: 2123exit:
2149 tipc_sk_put(tsk); 2124 tipc_sk_put(tsk);
2150} 2125}
@@ -2802,3 +2777,233 @@ void tipc_socket_stop(void)
2802 sock_unregister(tipc_family_ops.family); 2777 sock_unregister(tipc_family_ops.family);
2803 proto_unregister(&tipc_proto); 2778 proto_unregister(&tipc_proto);
2804} 2779}
2780
2781/* Caller should hold socket lock for the passed tipc socket. */
2782static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2783{
2784 u32 peer_node;
2785 u32 peer_port;
2786 struct nlattr *nest;
2787
2788 peer_node = tsk_peer_node(tsk);
2789 peer_port = tsk_peer_port(tsk);
2790
2791 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2792
2793 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2794 goto msg_full;
2795 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2796 goto msg_full;
2797
2798 if (tsk->conn_type != 0) {
2799 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2800 goto msg_full;
2801 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2802 goto msg_full;
2803 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2804 goto msg_full;
2805 }
2806 nla_nest_end(skb, nest);
2807
2808 return 0;
2809
2810msg_full:
2811 nla_nest_cancel(skb, nest);
2812
2813 return -EMSGSIZE;
2814}
2815
2816/* Caller should hold socket lock for the passed tipc socket. */
2817static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2818 struct tipc_sock *tsk)
2819{
2820 int err;
2821 void *hdr;
2822 struct nlattr *attrs;
2823
2824 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2825 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2826 if (!hdr)
2827 goto msg_cancel;
2828
2829 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2830 if (!attrs)
2831 goto genlmsg_cancel;
2832 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref))
2833 goto attr_msg_cancel;
2834 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
2835 goto attr_msg_cancel;
2836
2837 if (tsk->connected) {
2838 err = __tipc_nl_add_sk_con(skb, tsk);
2839 if (err)
2840 goto attr_msg_cancel;
2841 } else if (!list_empty(&tsk->publications)) {
2842 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2843 goto attr_msg_cancel;
2844 }
2845 nla_nest_end(skb, attrs);
2846 genlmsg_end(skb, hdr);
2847
2848 return 0;
2849
2850attr_msg_cancel:
2851 nla_nest_cancel(skb, attrs);
2852genlmsg_cancel:
2853 genlmsg_cancel(skb, hdr);
2854msg_cancel:
2855 return -EMSGSIZE;
2856}
2857
2858int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2859{
2860 int err;
2861 struct tipc_sock *tsk;
2862 u32 prev_ref = cb->args[0];
2863 u32 ref = prev_ref;
2864
2865 tsk = tipc_sk_get_next(&ref);
2866 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2867 lock_sock(&tsk->sk);
2868 err = __tipc_nl_add_sk(skb, cb, tsk);
2869 release_sock(&tsk->sk);
2870 tipc_sk_put(tsk);
2871 if (err)
2872 break;
2873
2874 prev_ref = ref;
2875 }
2876
2877 cb->args[0] = prev_ref;
2878
2879 return skb->len;
2880}
2881
2882/* Caller should hold socket lock for the passed tipc socket. */
2883static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2884 struct netlink_callback *cb,
2885 struct publication *publ)
2886{
2887 void *hdr;
2888 struct nlattr *attrs;
2889
2890 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2891 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2892 if (!hdr)
2893 goto msg_cancel;
2894
2895 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2896 if (!attrs)
2897 goto genlmsg_cancel;
2898
2899 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2900 goto attr_msg_cancel;
2901 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2902 goto attr_msg_cancel;
2903 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2904 goto attr_msg_cancel;
2905 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2906 goto attr_msg_cancel;
2907
2908 nla_nest_end(skb, attrs);
2909 genlmsg_end(skb, hdr);
2910
2911 return 0;
2912
2913attr_msg_cancel:
2914 nla_nest_cancel(skb, attrs);
2915genlmsg_cancel:
2916 genlmsg_cancel(skb, hdr);
2917msg_cancel:
2918 return -EMSGSIZE;
2919}
2920
2921/* Caller should hold socket lock for the passed tipc socket. */
2922static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2923 struct netlink_callback *cb,
2924 struct tipc_sock *tsk, u32 *last_publ)
2925{
2926 int err;
2927 struct publication *p;
2928
2929 if (*last_publ) {
2930 list_for_each_entry(p, &tsk->publications, pport_list) {
2931 if (p->key == *last_publ)
2932 break;
2933 }
2934 if (p->key != *last_publ) {
2935 /* We never set seq or call nl_dump_check_consistent()
2936 * this means that setting prev_seq here will cause the
2937 * consistence check to fail in the netlink callback
2938 * handler. Resulting in the last NLMSG_DONE message
2939 * having the NLM_F_DUMP_INTR flag set.
2940 */
2941 cb->prev_seq = 1;
2942 *last_publ = 0;
2943 return -EPIPE;
2944 }
2945 } else {
2946 p = list_first_entry(&tsk->publications, struct publication,
2947 pport_list);
2948 }
2949
2950 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2951 err = __tipc_nl_add_sk_publ(skb, cb, p);
2952 if (err) {
2953 *last_publ = p->key;
2954 return err;
2955 }
2956 }
2957 *last_publ = 0;
2958
2959 return 0;
2960}
2961
2962int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2963{
2964 int err;
2965 u32 tsk_ref = cb->args[0];
2966 u32 last_publ = cb->args[1];
2967 u32 done = cb->args[2];
2968 struct tipc_sock *tsk;
2969
2970 if (!tsk_ref) {
2971 struct nlattr **attrs;
2972 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2973
2974 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2975 if (err)
2976 return err;
2977
2978 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2979 attrs[TIPC_NLA_SOCK],
2980 tipc_nl_sock_policy);
2981 if (err)
2982 return err;
2983
2984 if (!sock[TIPC_NLA_SOCK_REF])
2985 return -EINVAL;
2986
2987 tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2988 }
2989
2990 if (done)
2991 return 0;
2992
2993 tsk = tipc_sk_get(tsk_ref);
2994 if (!tsk)
2995 return -EINVAL;
2996
2997 lock_sock(&tsk->sk);
2998 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2999 if (!err)
3000 done = 1;
3001 release_sock(&tsk->sk);
3002 tipc_sk_put(tsk);
3003
3004 cb->args[0] = tsk_ref;
3005 cb->args[1] = last_publ;
3006 cb->args[2] = done;
3007
3008 return skb->len;
3009}