aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c494
1 files changed, 280 insertions, 214 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index fd5f042dbff4..a9622b6cd916 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, 2012 Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2012, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,8 @@
43#define SS_LISTENING -1 /* socket is listening */ 43#define SS_LISTENING -1 /* socket is listening */
44#define SS_READY -2 /* socket is connectionless */ 44#define SS_READY -2 /* socket is connectionless */
45 45
46#define OVERLOAD_LIMIT_BASE 5000 46#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
47 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 48#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 49
49struct tipc_sock { 50struct tipc_sock {
@@ -62,6 +63,8 @@ struct tipc_sock {
62static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 63static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); 64static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
64static void wakeupdispatch(struct tipc_port *tport); 65static void wakeupdispatch(struct tipc_port *tport);
66static void tipc_data_ready(struct sock *sk, int len);
67static void tipc_write_space(struct sock *sk);
65 68
66static const struct proto_ops packet_ops; 69static const struct proto_ops packet_ops;
67static const struct proto_ops stream_ops; 70static const struct proto_ops stream_ops;
@@ -71,8 +74,6 @@ static struct proto tipc_proto;
71 74
72static int sockets_enabled; 75static int sockets_enabled;
73 76
74static atomic_t tipc_queue_size = ATOMIC_INIT(0);
75
76/* 77/*
77 * Revised TIPC socket locking policy: 78 * Revised TIPC socket locking policy:
78 * 79 *
@@ -126,22 +127,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
126static void advance_rx_queue(struct sock *sk) 127static void advance_rx_queue(struct sock *sk)
127{ 128{
128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 129 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
129 atomic_dec(&tipc_queue_size);
130}
131
132/**
133 * discard_rx_queue - discard all buffers in socket receive queue
134 *
135 * Caller must hold socket lock
136 */
137static void discard_rx_queue(struct sock *sk)
138{
139 struct sk_buff *buf;
140
141 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
142 atomic_dec(&tipc_queue_size);
143 kfree_skb(buf);
144 }
145} 130}
146 131
147/** 132/**
@@ -153,10 +138,8 @@ static void reject_rx_queue(struct sock *sk)
153{ 138{
154 struct sk_buff *buf; 139 struct sk_buff *buf;
155 140
156 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 141 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
157 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 142 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
158 atomic_dec(&tipc_queue_size);
159 }
160} 143}
161 144
162/** 145/**
@@ -220,7 +203,8 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
220 203
221 sock_init_data(sock, sk); 204 sock_init_data(sock, sk);
222 sk->sk_backlog_rcv = backlog_rcv; 205 sk->sk_backlog_rcv = backlog_rcv;
223 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; 206 sk->sk_data_ready = tipc_data_ready;
207 sk->sk_write_space = tipc_write_space;
224 tipc_sk(sk)->p = tp_ptr; 208 tipc_sk(sk)->p = tp_ptr;
225 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; 209 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
226 210
@@ -276,7 +260,6 @@ static int release(struct socket *sock)
276 buf = __skb_dequeue(&sk->sk_receive_queue); 260 buf = __skb_dequeue(&sk->sk_receive_queue);
277 if (buf == NULL) 261 if (buf == NULL)
278 break; 262 break;
279 atomic_dec(&tipc_queue_size);
280 if (TIPC_SKB_CB(buf)->handle != 0) 263 if (TIPC_SKB_CB(buf)->handle != 0)
281 kfree_skb(buf); 264 kfree_skb(buf);
282 else { 265 else {
@@ -296,7 +279,7 @@ static int release(struct socket *sock)
296 res = tipc_deleteport(tport->ref); 279 res = tipc_deleteport(tport->ref);
297 280
298 /* Discard any remaining (connection-based) messages in receive queue */ 281 /* Discard any remaining (connection-based) messages in receive queue */
299 discard_rx_queue(sk); 282 __skb_queue_purge(&sk->sk_receive_queue);
300 283
301 /* Reject any messages that accumulated in backlog queue */ 284 /* Reject any messages that accumulated in backlog queue */
302 sock->state = SS_DISCONNECTING; 285 sock->state = SS_DISCONNECTING;
@@ -408,7 +391,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
408 * socket state flags set 391 * socket state flags set
409 * ------------ --------- 392 * ------------ ---------
410 * unconnected no read flags 393 * unconnected no read flags
411 * no write flags 394 * POLLOUT if port is not congested
412 * 395 *
413 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue 396 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
414 * no write flags 397 * no write flags
@@ -435,9 +418,13 @@ static unsigned int poll(struct file *file, struct socket *sock,
435 struct sock *sk = sock->sk; 418 struct sock *sk = sock->sk;
436 u32 mask = 0; 419 u32 mask = 0;
437 420
438 poll_wait(file, sk_sleep(sk), wait); 421 sock_poll_wait(file, sk_sleep(sk), wait);
439 422
440 switch ((int)sock->state) { 423 switch ((int)sock->state) {
424 case SS_UNCONNECTED:
425 if (!tipc_sk_port(sk)->congested)
426 mask |= POLLOUT;
427 break;
441 case SS_READY: 428 case SS_READY:
442 case SS_CONNECTED: 429 case SS_CONNECTED:
443 if (!tipc_sk_port(sk)->congested) 430 if (!tipc_sk_port(sk)->congested)
@@ -516,8 +503,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
516 if (unlikely((m->msg_namelen < sizeof(*dest)) || 503 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
517 (dest->family != AF_TIPC))) 504 (dest->family != AF_TIPC)))
518 return -EINVAL; 505 return -EINVAL;
519 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 506 if (total_len > TIPC_MAX_USER_MSG_SIZE)
520 (m->msg_iovlen > (unsigned int)INT_MAX))
521 return -EMSGSIZE; 507 return -EMSGSIZE;
522 508
523 if (iocb) 509 if (iocb)
@@ -625,8 +611,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
625 if (unlikely(dest)) 611 if (unlikely(dest))
626 return send_msg(iocb, sock, m, total_len); 612 return send_msg(iocb, sock, m, total_len);
627 613
628 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 614 if (total_len > TIPC_MAX_USER_MSG_SIZE)
629 (m->msg_iovlen > (unsigned int)INT_MAX))
630 return -EMSGSIZE; 615 return -EMSGSIZE;
631 616
632 if (iocb) 617 if (iocb)
@@ -711,8 +696,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
711 goto exit; 696 goto exit;
712 } 697 }
713 698
714 if ((total_len > (unsigned int)INT_MAX) || 699 if (total_len > (unsigned int)INT_MAX) {
715 (m->msg_iovlen > (unsigned int)INT_MAX)) {
716 res = -EMSGSIZE; 700 res = -EMSGSIZE;
717 goto exit; 701 goto exit;
718 } 702 }
@@ -775,16 +759,19 @@ exit:
775static int auto_connect(struct socket *sock, struct tipc_msg *msg) 759static int auto_connect(struct socket *sock, struct tipc_msg *msg)
776{ 760{
777 struct tipc_sock *tsock = tipc_sk(sock->sk); 761 struct tipc_sock *tsock = tipc_sk(sock->sk);
778 762 struct tipc_port *p_ptr;
779 if (msg_errcode(msg)) {
780 sock->state = SS_DISCONNECTING;
781 return -ECONNREFUSED;
782 }
783 763
784 tsock->peer_name.ref = msg_origport(msg); 764 tsock->peer_name.ref = msg_origport(msg);
785 tsock->peer_name.node = msg_orignode(msg); 765 tsock->peer_name.node = msg_orignode(msg);
786 tipc_connect2port(tsock->p->ref, &tsock->peer_name); 766 p_ptr = tipc_port_deref(tsock->p->ref);
787 tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); 767 if (!p_ptr)
768 return -EINVAL;
769
770 __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
771
772 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
773 return -EINVAL;
774 msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
788 sock->state = SS_CONNECTED; 775 sock->state = SS_CONNECTED;
789 return 0; 776 return 0;
790} 777}
@@ -943,13 +930,6 @@ restart:
943 sz = msg_data_sz(msg); 930 sz = msg_data_sz(msg);
944 err = msg_errcode(msg); 931 err = msg_errcode(msg);
945 932
946 /* Complete connection setup for an implied connect */
947 if (unlikely(sock->state == SS_CONNECTING)) {
948 res = auto_connect(sock, msg);
949 if (res)
950 goto exit;
951 }
952
953 /* Discard an empty non-errored message & try again */ 933 /* Discard an empty non-errored message & try again */
954 if ((!sz) && (!err)) { 934 if ((!sz) && (!err)) {
955 advance_rx_queue(sk); 935 advance_rx_queue(sk);
@@ -1126,31 +1106,143 @@ exit:
1126} 1106}
1127 1107
1128/** 1108/**
1129 * rx_queue_full - determine if receive queue can accept another message 1109 * tipc_write_space - wake up thread if port congestion is released
1130 * @msg: message to be added to queue 1110 * @sk: socket
1131 * @queue_size: current size of queue 1111 */
1132 * @base: nominal maximum size of queue 1112static void tipc_write_space(struct sock *sk)
1113{
1114 struct socket_wq *wq;
1115
1116 rcu_read_lock();
1117 wq = rcu_dereference(sk->sk_wq);
1118 if (wq_has_sleeper(wq))
1119 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1120 POLLWRNORM | POLLWRBAND);
1121 rcu_read_unlock();
1122}
1123
1124/**
1125 * tipc_data_ready - wake up threads to indicate messages have been received
1126 * @sk: socket
1127 * @len: the length of messages
1128 */
1129static void tipc_data_ready(struct sock *sk, int len)
1130{
1131 struct socket_wq *wq;
1132
1133 rcu_read_lock();
1134 wq = rcu_dereference(sk->sk_wq);
1135 if (wq_has_sleeper(wq))
1136 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1137 POLLRDNORM | POLLRDBAND);
1138 rcu_read_unlock();
1139}
1140
1141/**
1142 * filter_connect - Handle all incoming messages for a connection-based socket
1143 * @tsock: TIPC socket
1144 * @msg: message
1133 * 1145 *
1134 * Returns 1 if queue is unable to accept message, 0 otherwise 1146 * Returns TIPC error status code and socket error status code
1147 * once it encounters some errors
1135 */ 1148 */
1136static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) 1149static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1137{ 1150{
1138 u32 threshold; 1151 struct socket *sock = tsock->sk.sk_socket;
1139 u32 imp = msg_importance(msg); 1152 struct tipc_msg *msg = buf_msg(*buf);
1140 1153 struct sock *sk = &tsock->sk;
1141 if (imp == TIPC_LOW_IMPORTANCE) 1154 u32 retval = TIPC_ERR_NO_PORT;
1142 threshold = base; 1155 int res;
1143 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1144 threshold = base * 2;
1145 else if (imp == TIPC_HIGH_IMPORTANCE)
1146 threshold = base * 100;
1147 else
1148 return 0;
1149 1156
1150 if (msg_connected(msg)) 1157 if (msg_mcast(msg))
1151 threshold *= 4; 1158 return retval;
1152 1159
1153 return queue_size >= threshold; 1160 switch ((int)sock->state) {
1161 case SS_CONNECTED:
1162 /* Accept only connection-based messages sent by peer */
1163 if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
1164 if (unlikely(msg_errcode(msg))) {
1165 sock->state = SS_DISCONNECTING;
1166 __tipc_disconnect(tsock->p);
1167 }
1168 retval = TIPC_OK;
1169 }
1170 break;
1171 case SS_CONNECTING:
1172 /* Accept only ACK or NACK message */
1173 if (unlikely(msg_errcode(msg))) {
1174 sock->state = SS_DISCONNECTING;
1175 sk->sk_err = -ECONNREFUSED;
1176 retval = TIPC_OK;
1177 break;
1178 }
1179
1180 if (unlikely(!msg_connected(msg)))
1181 break;
1182
1183 res = auto_connect(sock, msg);
1184 if (res) {
1185 sock->state = SS_DISCONNECTING;
1186 sk->sk_err = res;
1187 retval = TIPC_OK;
1188 break;
1189 }
1190
1191 /* If an incoming message is an 'ACK-', it should be
1192 * discarded here because it doesn't contain useful
1193 * data. In addition, we should try to wake up
1194 * connect() routine if sleeping.
1195 */
1196 if (msg_data_sz(msg) == 0) {
1197 kfree_skb(*buf);
1198 *buf = NULL;
1199 if (waitqueue_active(sk_sleep(sk)))
1200 wake_up_interruptible(sk_sleep(sk));
1201 }
1202 retval = TIPC_OK;
1203 break;
1204 case SS_LISTENING:
1205 case SS_UNCONNECTED:
1206 /* Accept only SYN message */
1207 if (!msg_connected(msg) && !(msg_errcode(msg)))
1208 retval = TIPC_OK;
1209 break;
1210 case SS_DISCONNECTING:
1211 break;
1212 default:
1213 pr_err("Unknown socket state %u\n", sock->state);
1214 }
1215 return retval;
1216}
1217
1218/**
1219 * rcvbuf_limit - get proper overload limit of socket receive queue
1220 * @sk: socket
1221 * @buf: message
1222 *
1223 * For all connection oriented messages, irrespective of importance,
1224 * the default overload value (i.e. 67MB) is set as limit.
1225 *
1226 * For all connectionless messages, by default new queue limits are
1227 * as belows:
1228 *
1229 * TIPC_LOW_IMPORTANCE (5MB)
1230 * TIPC_MEDIUM_IMPORTANCE (10MB)
1231 * TIPC_HIGH_IMPORTANCE (20MB)
1232 * TIPC_CRITICAL_IMPORTANCE (40MB)
1233 *
1234 * Returns overload limit according to corresponding message importance
1235 */
1236static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1237{
1238 struct tipc_msg *msg = buf_msg(buf);
1239 unsigned int limit;
1240
1241 if (msg_connected(msg))
1242 limit = CONN_OVERLOAD_LIMIT;
1243 else
1244 limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
1245 return limit;
1154} 1246}
1155 1247
1156/** 1248/**
@@ -1169,7 +1261,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1169{ 1261{
1170 struct socket *sock = sk->sk_socket; 1262 struct socket *sock = sk->sk_socket;
1171 struct tipc_msg *msg = buf_msg(buf); 1263 struct tipc_msg *msg = buf_msg(buf);
1172 u32 recv_q_len; 1264 unsigned int limit = rcvbuf_limit(sk, buf);
1265 u32 res = TIPC_OK;
1173 1266
1174 /* Reject message if it is wrong sort of message for socket */ 1267 /* Reject message if it is wrong sort of message for socket */
1175 if (msg_type(msg) > TIPC_DIRECT_MSG) 1268 if (msg_type(msg) > TIPC_DIRECT_MSG)
@@ -1179,51 +1272,21 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1179 if (msg_connected(msg)) 1272 if (msg_connected(msg))
1180 return TIPC_ERR_NO_PORT; 1273 return TIPC_ERR_NO_PORT;
1181 } else { 1274 } else {
1182 if (msg_mcast(msg)) 1275 res = filter_connect(tipc_sk(sk), &buf);
1183 return TIPC_ERR_NO_PORT; 1276 if (res != TIPC_OK || buf == NULL)
1184 if (sock->state == SS_CONNECTED) { 1277 return res;
1185 if (!msg_connected(msg) ||
1186 !tipc_port_peer_msg(tipc_sk_port(sk), msg))
1187 return TIPC_ERR_NO_PORT;
1188 } else if (sock->state == SS_CONNECTING) {
1189 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
1190 return TIPC_ERR_NO_PORT;
1191 } else if (sock->state == SS_LISTENING) {
1192 if (msg_connected(msg) || msg_errcode(msg))
1193 return TIPC_ERR_NO_PORT;
1194 } else if (sock->state == SS_DISCONNECTING) {
1195 return TIPC_ERR_NO_PORT;
1196 } else /* (sock->state == SS_UNCONNECTED) */ {
1197 if (msg_connected(msg) || msg_errcode(msg))
1198 return TIPC_ERR_NO_PORT;
1199 }
1200 } 1278 }
1201 1279
1202 /* Reject message if there isn't room to queue it */ 1280 /* Reject message if there isn't room to queue it */
1203 recv_q_len = (u32)atomic_read(&tipc_queue_size); 1281 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1204 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { 1282 return TIPC_ERR_OVERLOAD;
1205 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1206 return TIPC_ERR_OVERLOAD;
1207 }
1208 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1209 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1210 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1211 return TIPC_ERR_OVERLOAD;
1212 }
1213 1283
1214 /* Enqueue message (finally!) */ 1284 /* Enqueue message */
1215 TIPC_SKB_CB(buf)->handle = 0; 1285 TIPC_SKB_CB(buf)->handle = 0;
1216 atomic_inc(&tipc_queue_size);
1217 __skb_queue_tail(&sk->sk_receive_queue, buf); 1286 __skb_queue_tail(&sk->sk_receive_queue, buf);
1287 skb_set_owner_r(buf, sk);
1218 1288
1219 /* Initiate connection termination for an incoming 'FIN' */ 1289 sk->sk_data_ready(sk, 0);
1220 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1221 sock->state = SS_DISCONNECTING;
1222 tipc_disconnect_port(tipc_sk_port(sk));
1223 }
1224
1225 if (waitqueue_active(sk_sleep(sk)))
1226 wake_up_interruptible(sk_sleep(sk));
1227 return TIPC_OK; 1290 return TIPC_OK;
1228} 1291}
1229 1292
@@ -1270,7 +1333,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1270 if (!sock_owned_by_user(sk)) { 1333 if (!sock_owned_by_user(sk)) {
1271 res = filter_rcv(sk, buf); 1334 res = filter_rcv(sk, buf);
1272 } else { 1335 } else {
1273 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf)) 1336 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
1274 res = TIPC_ERR_OVERLOAD; 1337 res = TIPC_ERR_OVERLOAD;
1275 else 1338 else
1276 res = TIPC_OK; 1339 res = TIPC_OK;
@@ -1290,8 +1353,7 @@ static void wakeupdispatch(struct tipc_port *tport)
1290{ 1353{
1291 struct sock *sk = (struct sock *)tport->usr_handle; 1354 struct sock *sk = (struct sock *)tport->usr_handle;
1292 1355
1293 if (waitqueue_active(sk_sleep(sk))) 1356 sk->sk_write_space(sk);
1294 wake_up_interruptible(sk_sleep(sk));
1295} 1357}
1296 1358
1297/** 1359/**
@@ -1309,8 +1371,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1309 struct sock *sk = sock->sk; 1371 struct sock *sk = sock->sk;
1310 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 1372 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1311 struct msghdr m = {NULL,}; 1373 struct msghdr m = {NULL,};
1312 struct sk_buff *buf;
1313 struct tipc_msg *msg;
1314 unsigned int timeout; 1374 unsigned int timeout;
1315 int res; 1375 int res;
1316 1376
@@ -1322,26 +1382,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1322 goto exit; 1382 goto exit;
1323 } 1383 }
1324 1384
1325 /* For now, TIPC does not support the non-blocking form of connect() */
1326 if (flags & O_NONBLOCK) {
1327 res = -EOPNOTSUPP;
1328 goto exit;
1329 }
1330
1331 /* Issue Posix-compliant error code if socket is in the wrong state */
1332 if (sock->state == SS_LISTENING) {
1333 res = -EOPNOTSUPP;
1334 goto exit;
1335 }
1336 if (sock->state == SS_CONNECTING) {
1337 res = -EALREADY;
1338 goto exit;
1339 }
1340 if (sock->state != SS_UNCONNECTED) {
1341 res = -EISCONN;
1342 goto exit;
1343 }
1344
1345 /* 1385 /*
1346 * Reject connection attempt using multicast address 1386 * Reject connection attempt using multicast address
1347 * 1387 *
@@ -1353,49 +1393,66 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1353 goto exit; 1393 goto exit;
1354 } 1394 }
1355 1395
1356 /* Reject any messages already in receive queue (very unlikely) */ 1396 timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1357 reject_rx_queue(sk); 1397
1398 switch (sock->state) {
1399 case SS_UNCONNECTED:
1400 /* Send a 'SYN-' to destination */
1401 m.msg_name = dest;
1402 m.msg_namelen = destlen;
1403
1404 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1405 * indicate send_msg() is never blocked.
1406 */
1407 if (!timeout)
1408 m.msg_flags = MSG_DONTWAIT;
1358 1409
1359 /* Send a 'SYN-' to destination */ 1410 res = send_msg(NULL, sock, &m, 0);
1360 m.msg_name = dest; 1411 if ((res < 0) && (res != -EWOULDBLOCK))
1361 m.msg_namelen = destlen; 1412 goto exit;
1362 res = send_msg(NULL, sock, &m, 0); 1413
1363 if (res < 0) 1414 /* Just entered SS_CONNECTING state; the only
1415 * difference is that return value in non-blocking
1416 * case is EINPROGRESS, rather than EALREADY.
1417 */
1418 res = -EINPROGRESS;
1419 break;
1420 case SS_CONNECTING:
1421 res = -EALREADY;
1422 break;
1423 case SS_CONNECTED:
1424 res = -EISCONN;
1425 break;
1426 default:
1427 res = -EINVAL;
1364 goto exit; 1428 goto exit;
1429 }
1365 1430
1366 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1431 if (sock->state == SS_CONNECTING) {
1367 timeout = tipc_sk(sk)->conn_timeout; 1432 if (!timeout)
1368 release_sock(sk); 1433 goto exit;
1369 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1370 (!skb_queue_empty(&sk->sk_receive_queue) ||
1371 (sock->state != SS_CONNECTING)),
1372 timeout ? (long)msecs_to_jiffies(timeout)
1373 : MAX_SCHEDULE_TIMEOUT);
1374 lock_sock(sk);
1375 1434
1376 if (res > 0) { 1435 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1377 buf = skb_peek(&sk->sk_receive_queue); 1436 release_sock(sk);
1378 if (buf != NULL) { 1437 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1379 msg = buf_msg(buf); 1438 sock->state != SS_CONNECTING,
1380 res = auto_connect(sock, msg); 1439 timeout ? (long)msecs_to_jiffies(timeout)
1381 if (!res) { 1440 : MAX_SCHEDULE_TIMEOUT);
1382 if (!msg_data_sz(msg)) 1441 lock_sock(sk);
1383 advance_rx_queue(sk); 1442 if (res <= 0) {
1384 } 1443 if (res == 0)
1385 } else { 1444 res = -ETIMEDOUT;
1386 if (sock->state == SS_CONNECTED)
1387 res = -EISCONN;
1388 else 1445 else
1389 res = -ECONNREFUSED; 1446 ; /* leave "res" unchanged */
1447 goto exit;
1390 } 1448 }
1391 } else {
1392 if (res == 0)
1393 res = -ETIMEDOUT;
1394 else
1395 ; /* leave "res" unchanged */
1396 sock->state = SS_DISCONNECTING;
1397 } 1449 }
1398 1450
1451 if (unlikely(sock->state == SS_DISCONNECTING))
1452 res = sock_error(sk);
1453 else
1454 res = 0;
1455
1399exit: 1456exit:
1400 release_sock(sk); 1457 release_sock(sk);
1401 return res; 1458 return res;
@@ -1436,8 +1493,13 @@ static int listen(struct socket *sock, int len)
1436 */ 1493 */
1437static int accept(struct socket *sock, struct socket *new_sock, int flags) 1494static int accept(struct socket *sock, struct socket *new_sock, int flags)
1438{ 1495{
1439 struct sock *sk = sock->sk; 1496 struct sock *new_sk, *sk = sock->sk;
1440 struct sk_buff *buf; 1497 struct sk_buff *buf;
1498 struct tipc_sock *new_tsock;
1499 struct tipc_port *new_tport;
1500 struct tipc_msg *msg;
1501 u32 new_ref;
1502
1441 int res; 1503 int res;
1442 1504
1443 lock_sock(sk); 1505 lock_sock(sk);
@@ -1463,48 +1525,52 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1463 buf = skb_peek(&sk->sk_receive_queue); 1525 buf = skb_peek(&sk->sk_receive_queue);
1464 1526
1465 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0); 1527 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1466 if (!res) { 1528 if (res)
1467 struct sock *new_sk = new_sock->sk; 1529 goto exit;
1468 struct tipc_sock *new_tsock = tipc_sk(new_sk);
1469 struct tipc_port *new_tport = new_tsock->p;
1470 u32 new_ref = new_tport->ref;
1471 struct tipc_msg *msg = buf_msg(buf);
1472
1473 lock_sock(new_sk);
1474
1475 /*
1476 * Reject any stray messages received by new socket
1477 * before the socket lock was taken (very, very unlikely)
1478 */
1479 reject_rx_queue(new_sk);
1480
1481 /* Connect new socket to it's peer */
1482 new_tsock->peer_name.ref = msg_origport(msg);
1483 new_tsock->peer_name.node = msg_orignode(msg);
1484 tipc_connect2port(new_ref, &new_tsock->peer_name);
1485 new_sock->state = SS_CONNECTED;
1486
1487 tipc_set_portimportance(new_ref, msg_importance(msg));
1488 if (msg_named(msg)) {
1489 new_tport->conn_type = msg_nametype(msg);
1490 new_tport->conn_instance = msg_nameinst(msg);
1491 }
1492 1530
1493 /* 1531 new_sk = new_sock->sk;
1494 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1532 new_tsock = tipc_sk(new_sk);
1495 * Respond to 'SYN+' by queuing it on new socket. 1533 new_tport = new_tsock->p;
1496 */ 1534 new_ref = new_tport->ref;
1497 if (!msg_data_sz(msg)) { 1535 msg = buf_msg(buf);
1498 struct msghdr m = {NULL,};
1499 1536
1500 advance_rx_queue(sk); 1537 /* we lock on new_sk; but lockdep sees the lock on sk */
1501 send_packet(NULL, new_sock, &m, 0); 1538 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1502 } else { 1539
1503 __skb_dequeue(&sk->sk_receive_queue); 1540 /*
1504 __skb_queue_head(&new_sk->sk_receive_queue, buf); 1541 * Reject any stray messages received by new socket
1505 } 1542 * before the socket lock was taken (very, very unlikely)
1506 release_sock(new_sk); 1543 */
1544 reject_rx_queue(new_sk);
1545
1546 /* Connect new socket to it's peer */
1547 new_tsock->peer_name.ref = msg_origport(msg);
1548 new_tsock->peer_name.node = msg_orignode(msg);
1549 tipc_connect(new_ref, &new_tsock->peer_name);
1550 new_sock->state = SS_CONNECTED;
1551
1552 tipc_set_portimportance(new_ref, msg_importance(msg));
1553 if (msg_named(msg)) {
1554 new_tport->conn_type = msg_nametype(msg);
1555 new_tport->conn_instance = msg_nameinst(msg);
1556 }
1557
1558 /*
1559 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1560 * Respond to 'SYN+' by queuing it on new socket.
1561 */
1562 if (!msg_data_sz(msg)) {
1563 struct msghdr m = {NULL,};
1564
1565 advance_rx_queue(sk);
1566 send_packet(NULL, new_sock, &m, 0);
1567 } else {
1568 __skb_dequeue(&sk->sk_receive_queue);
1569 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1570 skb_set_owner_r(buf, new_sk);
1507 } 1571 }
1572 release_sock(new_sk);
1573
1508exit: 1574exit:
1509 release_sock(sk); 1575 release_sock(sk);
1510 return res; 1576 return res;
@@ -1539,7 +1605,6 @@ restart:
1539 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ 1605 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1540 buf = __skb_dequeue(&sk->sk_receive_queue); 1606 buf = __skb_dequeue(&sk->sk_receive_queue);
1541 if (buf) { 1607 if (buf) {
1542 atomic_dec(&tipc_queue_size);
1543 if (TIPC_SKB_CB(buf)->handle != 0) { 1608 if (TIPC_SKB_CB(buf)->handle != 0) {
1544 kfree_skb(buf); 1609 kfree_skb(buf);
1545 goto restart; 1610 goto restart;
@@ -1556,10 +1621,11 @@ restart:
1556 1621
1557 case SS_DISCONNECTING: 1622 case SS_DISCONNECTING:
1558 1623
1559 /* Discard any unreceived messages; wake up sleeping tasks */ 1624 /* Discard any unreceived messages */
1560 discard_rx_queue(sk); 1625 __skb_queue_purge(&sk->sk_receive_queue);
1561 if (waitqueue_active(sk_sleep(sk))) 1626
1562 wake_up_interruptible(sk_sleep(sk)); 1627 /* Wake up anyone sleeping in poll */
1628 sk->sk_state_change(sk);
1563 res = 0; 1629 res = 0;
1564 break; 1630 break;
1565 1631
@@ -1677,7 +1743,7 @@ static int getsockopt(struct socket *sock,
1677 /* no need to set "res", since already 0 at this point */ 1743 /* no need to set "res", since already 0 at this point */
1678 break; 1744 break;
1679 case TIPC_NODE_RECVQ_DEPTH: 1745 case TIPC_NODE_RECVQ_DEPTH:
1680 value = (u32)atomic_read(&tipc_queue_size); 1746 value = 0; /* was tipc_queue_size, now obsolete */
1681 break; 1747 break;
1682 case TIPC_SOCK_RECVQ_DEPTH: 1748 case TIPC_SOCK_RECVQ_DEPTH:
1683 value = skb_queue_len(&sk->sk_receive_queue); 1749 value = skb_queue_len(&sk->sk_receive_queue);