aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-01-24 06:47:48 -0500
committerIngo Molnar <mingo@kernel.org>2013-01-24 06:47:48 -0500
commitbefddb21c845f8fb49e637997891ef97c6a869dc (patch)
tree0e7629123184f2dd50291ad6d477b894175f0f26 /net/tipc/socket.c
parente716efde75267eab919cdb2bef5b2cb77f305326 (diff)
parent7d1f9aeff1ee4a20b1aeb377dd0f579fe9647619 (diff)
Merge tag 'v3.8-rc4' into irq/core
Merge Linux 3.8-rc4 before pulling in new commits - we were on an old v3.7 base. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c411
1 files changed, 246 insertions, 165 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index fd5f042dbff4..9b4e4833a484 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, 2012 Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2012, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
43#define SS_LISTENING -1 /* socket is listening */ 43#define SS_LISTENING -1 /* socket is listening */
44#define SS_READY -2 /* socket is connectionless */ 44#define SS_READY -2 /* socket is connectionless */
45 45
46#define OVERLOAD_LIMIT_BASE 5000 46#define OVERLOAD_LIMIT_BASE 10000
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 48
49struct tipc_sock { 49struct tipc_sock {
@@ -62,6 +62,8 @@ struct tipc_sock {
62static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 62static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); 63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
64static void wakeupdispatch(struct tipc_port *tport); 64static void wakeupdispatch(struct tipc_port *tport);
65static void tipc_data_ready(struct sock *sk, int len);
66static void tipc_write_space(struct sock *sk);
65 67
66static const struct proto_ops packet_ops; 68static const struct proto_ops packet_ops;
67static const struct proto_ops stream_ops; 69static const struct proto_ops stream_ops;
@@ -71,8 +73,6 @@ static struct proto tipc_proto;
71 73
72static int sockets_enabled; 74static int sockets_enabled;
73 75
74static atomic_t tipc_queue_size = ATOMIC_INIT(0);
75
76/* 76/*
77 * Revised TIPC socket locking policy: 77 * Revised TIPC socket locking policy:
78 * 78 *
@@ -126,7 +126,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
126static void advance_rx_queue(struct sock *sk) 126static void advance_rx_queue(struct sock *sk)
127{ 127{
128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
129 atomic_dec(&tipc_queue_size);
130} 129}
131 130
132/** 131/**
@@ -138,10 +137,8 @@ static void discard_rx_queue(struct sock *sk)
138{ 137{
139 struct sk_buff *buf; 138 struct sk_buff *buf;
140 139
141 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 140 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
142 atomic_dec(&tipc_queue_size);
143 kfree_skb(buf); 141 kfree_skb(buf);
144 }
145} 142}
146 143
147/** 144/**
@@ -153,10 +150,8 @@ static void reject_rx_queue(struct sock *sk)
153{ 150{
154 struct sk_buff *buf; 151 struct sk_buff *buf;
155 152
156 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 153 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
157 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 154 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
158 atomic_dec(&tipc_queue_size);
159 }
160} 155}
161 156
162/** 157/**
@@ -221,6 +216,8 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
221 sock_init_data(sock, sk); 216 sock_init_data(sock, sk);
222 sk->sk_backlog_rcv = backlog_rcv; 217 sk->sk_backlog_rcv = backlog_rcv;
223 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; 218 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2;
219 sk->sk_data_ready = tipc_data_ready;
220 sk->sk_write_space = tipc_write_space;
224 tipc_sk(sk)->p = tp_ptr; 221 tipc_sk(sk)->p = tp_ptr;
225 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; 222 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
226 223
@@ -276,7 +273,6 @@ static int release(struct socket *sock)
276 buf = __skb_dequeue(&sk->sk_receive_queue); 273 buf = __skb_dequeue(&sk->sk_receive_queue);
277 if (buf == NULL) 274 if (buf == NULL)
278 break; 275 break;
279 atomic_dec(&tipc_queue_size);
280 if (TIPC_SKB_CB(buf)->handle != 0) 276 if (TIPC_SKB_CB(buf)->handle != 0)
281 kfree_skb(buf); 277 kfree_skb(buf);
282 else { 278 else {
@@ -408,7 +404,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
408 * socket state flags set 404 * socket state flags set
409 * ------------ --------- 405 * ------------ ---------
410 * unconnected no read flags 406 * unconnected no read flags
411 * no write flags 407 * POLLOUT if port is not congested
412 * 408 *
413 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue 409 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
414 * no write flags 410 * no write flags
@@ -435,9 +431,13 @@ static unsigned int poll(struct file *file, struct socket *sock,
435 struct sock *sk = sock->sk; 431 struct sock *sk = sock->sk;
436 u32 mask = 0; 432 u32 mask = 0;
437 433
438 poll_wait(file, sk_sleep(sk), wait); 434 sock_poll_wait(file, sk_sleep(sk), wait);
439 435
440 switch ((int)sock->state) { 436 switch ((int)sock->state) {
437 case SS_UNCONNECTED:
438 if (!tipc_sk_port(sk)->congested)
439 mask |= POLLOUT;
440 break;
441 case SS_READY: 441 case SS_READY:
442 case SS_CONNECTED: 442 case SS_CONNECTED:
443 if (!tipc_sk_port(sk)->congested) 443 if (!tipc_sk_port(sk)->congested)
@@ -775,16 +775,19 @@ exit:
775static int auto_connect(struct socket *sock, struct tipc_msg *msg) 775static int auto_connect(struct socket *sock, struct tipc_msg *msg)
776{ 776{
777 struct tipc_sock *tsock = tipc_sk(sock->sk); 777 struct tipc_sock *tsock = tipc_sk(sock->sk);
778 778 struct tipc_port *p_ptr;
779 if (msg_errcode(msg)) {
780 sock->state = SS_DISCONNECTING;
781 return -ECONNREFUSED;
782 }
783 779
784 tsock->peer_name.ref = msg_origport(msg); 780 tsock->peer_name.ref = msg_origport(msg);
785 tsock->peer_name.node = msg_orignode(msg); 781 tsock->peer_name.node = msg_orignode(msg);
786 tipc_connect2port(tsock->p->ref, &tsock->peer_name); 782 p_ptr = tipc_port_deref(tsock->p->ref);
787 tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); 783 if (!p_ptr)
784 return -EINVAL;
785
786 __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
787
788 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
789 return -EINVAL;
790 msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
788 sock->state = SS_CONNECTED; 791 sock->state = SS_CONNECTED;
789 return 0; 792 return 0;
790} 793}
@@ -943,13 +946,6 @@ restart:
943 sz = msg_data_sz(msg); 946 sz = msg_data_sz(msg);
944 err = msg_errcode(msg); 947 err = msg_errcode(msg);
945 948
946 /* Complete connection setup for an implied connect */
947 if (unlikely(sock->state == SS_CONNECTING)) {
948 res = auto_connect(sock, msg);
949 if (res)
950 goto exit;
951 }
952
953 /* Discard an empty non-errored message & try again */ 949 /* Discard an empty non-errored message & try again */
954 if ((!sz) && (!err)) { 950 if ((!sz) && (!err)) {
955 advance_rx_queue(sk); 951 advance_rx_queue(sk);
@@ -1126,6 +1122,39 @@ exit:
1126} 1122}
1127 1123
1128/** 1124/**
1125 * tipc_write_space - wake up thread if port congestion is released
1126 * @sk: socket
1127 */
1128static void tipc_write_space(struct sock *sk)
1129{
1130 struct socket_wq *wq;
1131
1132 rcu_read_lock();
1133 wq = rcu_dereference(sk->sk_wq);
1134 if (wq_has_sleeper(wq))
1135 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1136 POLLWRNORM | POLLWRBAND);
1137 rcu_read_unlock();
1138}
1139
1140/**
1141 * tipc_data_ready - wake up threads to indicate messages have been received
1142 * @sk: socket
1143 * @len: the length of messages
1144 */
1145static void tipc_data_ready(struct sock *sk, int len)
1146{
1147 struct socket_wq *wq;
1148
1149 rcu_read_lock();
1150 wq = rcu_dereference(sk->sk_wq);
1151 if (wq_has_sleeper(wq))
1152 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1153 POLLRDNORM | POLLRDBAND);
1154 rcu_read_unlock();
1155}
1156
1157/**
1129 * rx_queue_full - determine if receive queue can accept another message 1158 * rx_queue_full - determine if receive queue can accept another message
1130 * @msg: message to be added to queue 1159 * @msg: message to be added to queue
1131 * @queue_size: current size of queue 1160 * @queue_size: current size of queue
@@ -1154,6 +1183,83 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1154} 1183}
1155 1184
1156/** 1185/**
1186 * filter_connect - Handle all incoming messages for a connection-based socket
1187 * @tsock: TIPC socket
1188 * @msg: message
1189 *
1190 * Returns TIPC error status code and socket error status code
1191 * once it encounters some errors
1192 */
1193static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1194{
1195 struct socket *sock = tsock->sk.sk_socket;
1196 struct tipc_msg *msg = buf_msg(*buf);
1197 struct sock *sk = &tsock->sk;
1198 u32 retval = TIPC_ERR_NO_PORT;
1199 int res;
1200
1201 if (msg_mcast(msg))
1202 return retval;
1203
1204 switch ((int)sock->state) {
1205 case SS_CONNECTED:
1206 /* Accept only connection-based messages sent by peer */
1207 if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
1208 if (unlikely(msg_errcode(msg))) {
1209 sock->state = SS_DISCONNECTING;
1210 __tipc_disconnect(tsock->p);
1211 }
1212 retval = TIPC_OK;
1213 }
1214 break;
1215 case SS_CONNECTING:
1216 /* Accept only ACK or NACK message */
1217 if (unlikely(msg_errcode(msg))) {
1218 sock->state = SS_DISCONNECTING;
1219 sk->sk_err = -ECONNREFUSED;
1220 retval = TIPC_OK;
1221 break;
1222 }
1223
1224 if (unlikely(!msg_connected(msg)))
1225 break;
1226
1227 res = auto_connect(sock, msg);
1228 if (res) {
1229 sock->state = SS_DISCONNECTING;
1230 sk->sk_err = res;
1231 retval = TIPC_OK;
1232 break;
1233 }
1234
1235 /* If an incoming message is an 'ACK-', it should be
1236 * discarded here because it doesn't contain useful
1237 * data. In addition, we should try to wake up
1238 * connect() routine if sleeping.
1239 */
1240 if (msg_data_sz(msg) == 0) {
1241 kfree_skb(*buf);
1242 *buf = NULL;
1243 if (waitqueue_active(sk_sleep(sk)))
1244 wake_up_interruptible(sk_sleep(sk));
1245 }
1246 retval = TIPC_OK;
1247 break;
1248 case SS_LISTENING:
1249 case SS_UNCONNECTED:
1250 /* Accept only SYN message */
1251 if (!msg_connected(msg) && !(msg_errcode(msg)))
1252 retval = TIPC_OK;
1253 break;
1254 case SS_DISCONNECTING:
1255 break;
1256 default:
1257 pr_err("Unknown socket state %u\n", sock->state);
1258 }
1259 return retval;
1260}
1261
1262/**
1157 * filter_rcv - validate incoming message 1263 * filter_rcv - validate incoming message
1158 * @sk: socket 1264 * @sk: socket
1159 * @buf: message 1265 * @buf: message
@@ -1170,6 +1276,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1170 struct socket *sock = sk->sk_socket; 1276 struct socket *sock = sk->sk_socket;
1171 struct tipc_msg *msg = buf_msg(buf); 1277 struct tipc_msg *msg = buf_msg(buf);
1172 u32 recv_q_len; 1278 u32 recv_q_len;
1279 u32 res = TIPC_OK;
1173 1280
1174 /* Reject message if it is wrong sort of message for socket */ 1281 /* Reject message if it is wrong sort of message for socket */
1175 if (msg_type(msg) > TIPC_DIRECT_MSG) 1282 if (msg_type(msg) > TIPC_DIRECT_MSG)
@@ -1179,32 +1286,12 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1179 if (msg_connected(msg)) 1286 if (msg_connected(msg))
1180 return TIPC_ERR_NO_PORT; 1287 return TIPC_ERR_NO_PORT;
1181 } else { 1288 } else {
1182 if (msg_mcast(msg)) 1289 res = filter_connect(tipc_sk(sk), &buf);
1183 return TIPC_ERR_NO_PORT; 1290 if (res != TIPC_OK || buf == NULL)
1184 if (sock->state == SS_CONNECTED) { 1291 return res;
1185 if (!msg_connected(msg) ||
1186 !tipc_port_peer_msg(tipc_sk_port(sk), msg))
1187 return TIPC_ERR_NO_PORT;
1188 } else if (sock->state == SS_CONNECTING) {
1189 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
1190 return TIPC_ERR_NO_PORT;
1191 } else if (sock->state == SS_LISTENING) {
1192 if (msg_connected(msg) || msg_errcode(msg))
1193 return TIPC_ERR_NO_PORT;
1194 } else if (sock->state == SS_DISCONNECTING) {
1195 return TIPC_ERR_NO_PORT;
1196 } else /* (sock->state == SS_UNCONNECTED) */ {
1197 if (msg_connected(msg) || msg_errcode(msg))
1198 return TIPC_ERR_NO_PORT;
1199 }
1200 } 1292 }
1201 1293
1202 /* Reject message if there isn't room to queue it */ 1294 /* Reject message if there isn't room to queue it */
1203 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1204 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1205 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1206 return TIPC_ERR_OVERLOAD;
1207 }
1208 recv_q_len = skb_queue_len(&sk->sk_receive_queue); 1295 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1209 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { 1296 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1210 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2)) 1297 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
@@ -1213,17 +1300,9 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1213 1300
1214 /* Enqueue message (finally!) */ 1301 /* Enqueue message (finally!) */
1215 TIPC_SKB_CB(buf)->handle = 0; 1302 TIPC_SKB_CB(buf)->handle = 0;
1216 atomic_inc(&tipc_queue_size);
1217 __skb_queue_tail(&sk->sk_receive_queue, buf); 1303 __skb_queue_tail(&sk->sk_receive_queue, buf);
1218 1304
1219 /* Initiate connection termination for an incoming 'FIN' */ 1305 sk->sk_data_ready(sk, 0);
1220 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1221 sock->state = SS_DISCONNECTING;
1222 tipc_disconnect_port(tipc_sk_port(sk));
1223 }
1224
1225 if (waitqueue_active(sk_sleep(sk)))
1226 wake_up_interruptible(sk_sleep(sk));
1227 return TIPC_OK; 1306 return TIPC_OK;
1228} 1307}
1229 1308
@@ -1290,8 +1369,7 @@ static void wakeupdispatch(struct tipc_port *tport)
1290{ 1369{
1291 struct sock *sk = (struct sock *)tport->usr_handle; 1370 struct sock *sk = (struct sock *)tport->usr_handle;
1292 1371
1293 if (waitqueue_active(sk_sleep(sk))) 1372 sk->sk_write_space(sk);
1294 wake_up_interruptible(sk_sleep(sk));
1295} 1373}
1296 1374
1297/** 1375/**
@@ -1309,8 +1387,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1309 struct sock *sk = sock->sk; 1387 struct sock *sk = sock->sk;
1310 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 1388 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1311 struct msghdr m = {NULL,}; 1389 struct msghdr m = {NULL,};
1312 struct sk_buff *buf;
1313 struct tipc_msg *msg;
1314 unsigned int timeout; 1390 unsigned int timeout;
1315 int res; 1391 int res;
1316 1392
@@ -1322,26 +1398,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1322 goto exit; 1398 goto exit;
1323 } 1399 }
1324 1400
1325 /* For now, TIPC does not support the non-blocking form of connect() */
1326 if (flags & O_NONBLOCK) {
1327 res = -EOPNOTSUPP;
1328 goto exit;
1329 }
1330
1331 /* Issue Posix-compliant error code if socket is in the wrong state */
1332 if (sock->state == SS_LISTENING) {
1333 res = -EOPNOTSUPP;
1334 goto exit;
1335 }
1336 if (sock->state == SS_CONNECTING) {
1337 res = -EALREADY;
1338 goto exit;
1339 }
1340 if (sock->state != SS_UNCONNECTED) {
1341 res = -EISCONN;
1342 goto exit;
1343 }
1344
1345 /* 1401 /*
1346 * Reject connection attempt using multicast address 1402 * Reject connection attempt using multicast address
1347 * 1403 *
@@ -1353,49 +1409,66 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1353 goto exit; 1409 goto exit;
1354 } 1410 }
1355 1411
1356 /* Reject any messages already in receive queue (very unlikely) */ 1412 timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1357 reject_rx_queue(sk);
1358 1413
1359 /* Send a 'SYN-' to destination */ 1414 switch (sock->state) {
1360 m.msg_name = dest; 1415 case SS_UNCONNECTED:
1361 m.msg_namelen = destlen; 1416 /* Send a 'SYN-' to destination */
1362 res = send_msg(NULL, sock, &m, 0); 1417 m.msg_name = dest;
1363 if (res < 0) 1418 m.msg_namelen = destlen;
1419
1420 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1421 * indicate send_msg() is never blocked.
1422 */
1423 if (!timeout)
1424 m.msg_flags = MSG_DONTWAIT;
1425
1426 res = send_msg(NULL, sock, &m, 0);
1427 if ((res < 0) && (res != -EWOULDBLOCK))
1428 goto exit;
1429
1430 /* Just entered SS_CONNECTING state; the only
1431 * difference is that return value in non-blocking
1432 * case is EINPROGRESS, rather than EALREADY.
1433 */
1434 res = -EINPROGRESS;
1435 break;
1436 case SS_CONNECTING:
1437 res = -EALREADY;
1438 break;
1439 case SS_CONNECTED:
1440 res = -EISCONN;
1441 break;
1442 default:
1443 res = -EINVAL;
1364 goto exit; 1444 goto exit;
1445 }
1365 1446
1366 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1447 if (sock->state == SS_CONNECTING) {
1367 timeout = tipc_sk(sk)->conn_timeout; 1448 if (!timeout)
1368 release_sock(sk); 1449 goto exit;
1369 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1370 (!skb_queue_empty(&sk->sk_receive_queue) ||
1371 (sock->state != SS_CONNECTING)),
1372 timeout ? (long)msecs_to_jiffies(timeout)
1373 : MAX_SCHEDULE_TIMEOUT);
1374 lock_sock(sk);
1375 1450
1376 if (res > 0) { 1451 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1377 buf = skb_peek(&sk->sk_receive_queue); 1452 release_sock(sk);
1378 if (buf != NULL) { 1453 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1379 msg = buf_msg(buf); 1454 sock->state != SS_CONNECTING,
1380 res = auto_connect(sock, msg); 1455 timeout ? (long)msecs_to_jiffies(timeout)
1381 if (!res) { 1456 : MAX_SCHEDULE_TIMEOUT);
1382 if (!msg_data_sz(msg)) 1457 lock_sock(sk);
1383 advance_rx_queue(sk); 1458 if (res <= 0) {
1384 } 1459 if (res == 0)
1385 } else { 1460 res = -ETIMEDOUT;
1386 if (sock->state == SS_CONNECTED)
1387 res = -EISCONN;
1388 else 1461 else
1389 res = -ECONNREFUSED; 1462 ; /* leave "res" unchanged */
1463 goto exit;
1390 } 1464 }
1391 } else {
1392 if (res == 0)
1393 res = -ETIMEDOUT;
1394 else
1395 ; /* leave "res" unchanged */
1396 sock->state = SS_DISCONNECTING;
1397 } 1465 }
1398 1466
1467 if (unlikely(sock->state == SS_DISCONNECTING))
1468 res = sock_error(sk);
1469 else
1470 res = 0;
1471
1399exit: 1472exit:
1400 release_sock(sk); 1473 release_sock(sk);
1401 return res; 1474 return res;
@@ -1436,8 +1509,13 @@ static int listen(struct socket *sock, int len)
1436 */ 1509 */
1437static int accept(struct socket *sock, struct socket *new_sock, int flags) 1510static int accept(struct socket *sock, struct socket *new_sock, int flags)
1438{ 1511{
1439 struct sock *sk = sock->sk; 1512 struct sock *new_sk, *sk = sock->sk;
1440 struct sk_buff *buf; 1513 struct sk_buff *buf;
1514 struct tipc_sock *new_tsock;
1515 struct tipc_port *new_tport;
1516 struct tipc_msg *msg;
1517 u32 new_ref;
1518
1441 int res; 1519 int res;
1442 1520
1443 lock_sock(sk); 1521 lock_sock(sk);
@@ -1463,48 +1541,51 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1463 buf = skb_peek(&sk->sk_receive_queue); 1541 buf = skb_peek(&sk->sk_receive_queue);
1464 1542
1465 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0); 1543 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1466 if (!res) { 1544 if (res)
1467 struct sock *new_sk = new_sock->sk; 1545 goto exit;
1468 struct tipc_sock *new_tsock = tipc_sk(new_sk);
1469 struct tipc_port *new_tport = new_tsock->p;
1470 u32 new_ref = new_tport->ref;
1471 struct tipc_msg *msg = buf_msg(buf);
1472
1473 lock_sock(new_sk);
1474
1475 /*
1476 * Reject any stray messages received by new socket
1477 * before the socket lock was taken (very, very unlikely)
1478 */
1479 reject_rx_queue(new_sk);
1480
1481 /* Connect new socket to it's peer */
1482 new_tsock->peer_name.ref = msg_origport(msg);
1483 new_tsock->peer_name.node = msg_orignode(msg);
1484 tipc_connect2port(new_ref, &new_tsock->peer_name);
1485 new_sock->state = SS_CONNECTED;
1486
1487 tipc_set_portimportance(new_ref, msg_importance(msg));
1488 if (msg_named(msg)) {
1489 new_tport->conn_type = msg_nametype(msg);
1490 new_tport->conn_instance = msg_nameinst(msg);
1491 }
1492 1546
1493 /* 1547 new_sk = new_sock->sk;
1494 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1548 new_tsock = tipc_sk(new_sk);
1495 * Respond to 'SYN+' by queuing it on new socket. 1549 new_tport = new_tsock->p;
1496 */ 1550 new_ref = new_tport->ref;
1497 if (!msg_data_sz(msg)) { 1551 msg = buf_msg(buf);
1498 struct msghdr m = {NULL,};
1499 1552
1500 advance_rx_queue(sk); 1553 /* we lock on new_sk; but lockdep sees the lock on sk */
1501 send_packet(NULL, new_sock, &m, 0); 1554 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1502 } else { 1555
1503 __skb_dequeue(&sk->sk_receive_queue); 1556 /*
1504 __skb_queue_head(&new_sk->sk_receive_queue, buf); 1557 * Reject any stray messages received by new socket
1505 } 1558 * before the socket lock was taken (very, very unlikely)
1506 release_sock(new_sk); 1559 */
1560 reject_rx_queue(new_sk);
1561
1562 /* Connect new socket to it's peer */
1563 new_tsock->peer_name.ref = msg_origport(msg);
1564 new_tsock->peer_name.node = msg_orignode(msg);
1565 tipc_connect(new_ref, &new_tsock->peer_name);
1566 new_sock->state = SS_CONNECTED;
1567
1568 tipc_set_portimportance(new_ref, msg_importance(msg));
1569 if (msg_named(msg)) {
1570 new_tport->conn_type = msg_nametype(msg);
1571 new_tport->conn_instance = msg_nameinst(msg);
1507 } 1572 }
1573
1574 /*
1575 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1576 * Respond to 'SYN+' by queuing it on new socket.
1577 */
1578 if (!msg_data_sz(msg)) {
1579 struct msghdr m = {NULL,};
1580
1581 advance_rx_queue(sk);
1582 send_packet(NULL, new_sock, &m, 0);
1583 } else {
1584 __skb_dequeue(&sk->sk_receive_queue);
1585 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1586 }
1587 release_sock(new_sk);
1588
1508exit: 1589exit:
1509 release_sock(sk); 1590 release_sock(sk);
1510 return res; 1591 return res;
@@ -1539,7 +1620,6 @@ restart:
1539 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ 1620 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1540 buf = __skb_dequeue(&sk->sk_receive_queue); 1621 buf = __skb_dequeue(&sk->sk_receive_queue);
1541 if (buf) { 1622 if (buf) {
1542 atomic_dec(&tipc_queue_size);
1543 if (TIPC_SKB_CB(buf)->handle != 0) { 1623 if (TIPC_SKB_CB(buf)->handle != 0) {
1544 kfree_skb(buf); 1624 kfree_skb(buf);
1545 goto restart; 1625 goto restart;
@@ -1556,10 +1636,11 @@ restart:
1556 1636
1557 case SS_DISCONNECTING: 1637 case SS_DISCONNECTING:
1558 1638
1559 /* Discard any unreceived messages; wake up sleeping tasks */ 1639 /* Discard any unreceived messages */
1560 discard_rx_queue(sk); 1640 discard_rx_queue(sk);
1561 if (waitqueue_active(sk_sleep(sk))) 1641
1562 wake_up_interruptible(sk_sleep(sk)); 1642 /* Wake up anyone sleeping in poll */
1643 sk->sk_state_change(sk);
1563 res = 0; 1644 res = 0;
1564 break; 1645 break;
1565 1646
@@ -1677,7 +1758,7 @@ static int getsockopt(struct socket *sock,
1677 /* no need to set "res", since already 0 at this point */ 1758 /* no need to set "res", since already 0 at this point */
1678 break; 1759 break;
1679 case TIPC_NODE_RECVQ_DEPTH: 1760 case TIPC_NODE_RECVQ_DEPTH:
1680 value = (u32)atomic_read(&tipc_queue_size); 1761 value = 0; /* was tipc_queue_size, now obsolete */
1681 break; 1762 break;
1682 case TIPC_SOCK_RECVQ_DEPTH: 1763 case TIPC_SOCK_RECVQ_DEPTH:
1683 value = skb_queue_len(&sk->sk_receive_queue); 1764 value = skb_queue_len(&sk->sk_receive_queue);