diff options
Diffstat (limited to 'net/tipc')
-rw-r--r-- | net/tipc/Kconfig | 4 | ||||
-rw-r--r-- | net/tipc/bcast.c | 1 | ||||
-rw-r--r-- | net/tipc/name_table.c | 8 | ||||
-rw-r--r-- | net/tipc/node.c | 3 | ||||
-rw-r--r-- | net/tipc/socket.c | 110 |
5 files changed, 58 insertions, 68 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig index bc41bd31eadc..4f99600a5fed 100644 --- a/net/tipc/Kconfig +++ b/net/tipc/Kconfig | |||
@@ -3,8 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig TIPC | 5 | menuconfig TIPC |
6 | tristate "The TIPC Protocol (EXPERIMENTAL)" | 6 | tristate "The TIPC Protocol" |
7 | depends on INET && EXPERIMENTAL | 7 | depends on INET |
8 | ---help--- | 8 | ---help--- |
9 | The Transparent Inter Process Communication (TIPC) protocol is | 9 | The Transparent Inter Process Communication (TIPC) protocol is |
10 | specially designed for intra cluster communication. This protocol | 10 | specially designed for intra cluster communication. This protocol |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 54f89f90ac33..2655c9f4ecad 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -774,6 +774,7 @@ void tipc_bclink_init(void) | |||
774 | bcl->owner = &bclink->node; | 774 | bcl->owner = &bclink->node; |
775 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 775 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; |
776 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 776 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
777 | spin_lock_init(&bcbearer->bearer.lock); | ||
777 | bcl->b_ptr = &bcbearer->bearer; | 778 | bcl->b_ptr = &bcbearer->bearer; |
778 | bcl->state = WORKING_WORKING; | 779 | bcl->state = WORKING_WORKING; |
779 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); | 780 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 46754779fd3d..24b167914311 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, | |||
473 | static struct name_seq *nametbl_find_seq(u32 type) | 473 | static struct name_seq *nametbl_find_seq(u32 type) |
474 | { | 474 | { |
475 | struct hlist_head *seq_head; | 475 | struct hlist_head *seq_head; |
476 | struct hlist_node *seq_node; | ||
477 | struct name_seq *ns; | 476 | struct name_seq *ns; |
478 | 477 | ||
479 | seq_head = &table.types[hash(type)]; | 478 | seq_head = &table.types[hash(type)]; |
480 | hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { | 479 | hlist_for_each_entry(ns, seq_head, ns_list) { |
481 | if (ns->type == type) | 480 | if (ns->type == type) |
482 | return ns; | 481 | return ns; |
483 | } | 482 | } |
@@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
853 | u32 type, u32 lowbound, u32 upbound) | 852 | u32 type, u32 lowbound, u32 upbound) |
854 | { | 853 | { |
855 | struct hlist_head *seq_head; | 854 | struct hlist_head *seq_head; |
856 | struct hlist_node *seq_node; | ||
857 | struct name_seq *seq; | 855 | struct name_seq *seq; |
858 | int all_types; | 856 | int all_types; |
859 | int ret = 0; | 857 | int ret = 0; |
@@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
873 | upbound = ~0; | 871 | upbound = ~0; |
874 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 872 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
875 | seq_head = &table.types[i]; | 873 | seq_head = &table.types[i]; |
876 | hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { | 874 | hlist_for_each_entry(seq, seq_head, ns_list) { |
877 | ret += nameseq_list(seq, buf + ret, len - ret, | 875 | ret += nameseq_list(seq, buf + ret, len - ret, |
878 | depth, seq->type, | 876 | depth, seq->type, |
879 | lowbound, upbound, i); | 877 | lowbound, upbound, i); |
@@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info, | |||
889 | ret += nametbl_header(buf + ret, len - ret, depth); | 887 | ret += nametbl_header(buf + ret, len - ret, depth); |
890 | i = hash(type); | 888 | i = hash(type); |
891 | seq_head = &table.types[i]; | 889 | seq_head = &table.types[i]; |
892 | hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { | 890 | hlist_for_each_entry(seq, seq_head, ns_list) { |
893 | if (seq->type == type) { | 891 | if (seq->type == type) { |
894 | ret += nameseq_list(seq, buf + ret, len - ret, | 892 | ret += nameseq_list(seq, buf + ret, len - ret, |
895 | depth, type, | 893 | depth, type, |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 48f39dd3eae8..6e6c434872e8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr) | |||
69 | struct tipc_node *tipc_node_find(u32 addr) | 69 | struct tipc_node *tipc_node_find(u32 addr) |
70 | { | 70 | { |
71 | struct tipc_node *node; | 71 | struct tipc_node *node; |
72 | struct hlist_node *pos; | ||
73 | 72 | ||
74 | if (unlikely(!in_own_cluster_exact(addr))) | 73 | if (unlikely(!in_own_cluster_exact(addr))) |
75 | return NULL; | 74 | return NULL; |
76 | 75 | ||
77 | hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { | 76 | hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { |
78 | if (node->addr == addr) | 77 | if (node->addr == addr) |
79 | return node; | 78 | return node; |
80 | } | 79 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 9b4e4833a484..515ce38e4f4c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -43,7 +43,8 @@ | |||
43 | #define SS_LISTENING -1 /* socket is listening */ | 43 | #define SS_LISTENING -1 /* socket is listening */ |
44 | #define SS_READY -2 /* socket is connectionless */ | 44 | #define SS_READY -2 /* socket is connectionless */ |
45 | 45 | ||
46 | #define OVERLOAD_LIMIT_BASE 10000 | 46 | #define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \ |
47 | SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) | ||
47 | #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ | 48 | #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ |
48 | 49 | ||
49 | struct tipc_sock { | 50 | struct tipc_sock { |
@@ -129,19 +130,6 @@ static void advance_rx_queue(struct sock *sk) | |||
129 | } | 130 | } |
130 | 131 | ||
131 | /** | 132 | /** |
132 | * discard_rx_queue - discard all buffers in socket receive queue | ||
133 | * | ||
134 | * Caller must hold socket lock | ||
135 | */ | ||
136 | static void discard_rx_queue(struct sock *sk) | ||
137 | { | ||
138 | struct sk_buff *buf; | ||
139 | |||
140 | while ((buf = __skb_dequeue(&sk->sk_receive_queue))) | ||
141 | kfree_skb(buf); | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * reject_rx_queue - reject all buffers in socket receive queue | 133 | * reject_rx_queue - reject all buffers in socket receive queue |
146 | * | 134 | * |
147 | * Caller must hold socket lock | 135 | * Caller must hold socket lock |
@@ -215,7 +203,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, | |||
215 | 203 | ||
216 | sock_init_data(sock, sk); | 204 | sock_init_data(sock, sk); |
217 | sk->sk_backlog_rcv = backlog_rcv; | 205 | sk->sk_backlog_rcv = backlog_rcv; |
218 | sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; | ||
219 | sk->sk_data_ready = tipc_data_ready; | 206 | sk->sk_data_ready = tipc_data_ready; |
220 | sk->sk_write_space = tipc_write_space; | 207 | sk->sk_write_space = tipc_write_space; |
221 | tipc_sk(sk)->p = tp_ptr; | 208 | tipc_sk(sk)->p = tp_ptr; |
@@ -292,7 +279,7 @@ static int release(struct socket *sock) | |||
292 | res = tipc_deleteport(tport->ref); | 279 | res = tipc_deleteport(tport->ref); |
293 | 280 | ||
294 | /* Discard any remaining (connection-based) messages in receive queue */ | 281 | /* Discard any remaining (connection-based) messages in receive queue */ |
295 | discard_rx_queue(sk); | 282 | __skb_queue_purge(&sk->sk_receive_queue); |
296 | 283 | ||
297 | /* Reject any messages that accumulated in backlog queue */ | 284 | /* Reject any messages that accumulated in backlog queue */ |
298 | sock->state = SS_DISCONNECTING; | 285 | sock->state = SS_DISCONNECTING; |
@@ -516,8 +503,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, | |||
516 | if (unlikely((m->msg_namelen < sizeof(*dest)) || | 503 | if (unlikely((m->msg_namelen < sizeof(*dest)) || |
517 | (dest->family != AF_TIPC))) | 504 | (dest->family != AF_TIPC))) |
518 | return -EINVAL; | 505 | return -EINVAL; |
519 | if ((total_len > TIPC_MAX_USER_MSG_SIZE) || | 506 | if (total_len > TIPC_MAX_USER_MSG_SIZE) |
520 | (m->msg_iovlen > (unsigned int)INT_MAX)) | ||
521 | return -EMSGSIZE; | 507 | return -EMSGSIZE; |
522 | 508 | ||
523 | if (iocb) | 509 | if (iocb) |
@@ -625,8 +611,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, | |||
625 | if (unlikely(dest)) | 611 | if (unlikely(dest)) |
626 | return send_msg(iocb, sock, m, total_len); | 612 | return send_msg(iocb, sock, m, total_len); |
627 | 613 | ||
628 | if ((total_len > TIPC_MAX_USER_MSG_SIZE) || | 614 | if (total_len > TIPC_MAX_USER_MSG_SIZE) |
629 | (m->msg_iovlen > (unsigned int)INT_MAX)) | ||
630 | return -EMSGSIZE; | 615 | return -EMSGSIZE; |
631 | 616 | ||
632 | if (iocb) | 617 | if (iocb) |
@@ -711,8 +696,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, | |||
711 | goto exit; | 696 | goto exit; |
712 | } | 697 | } |
713 | 698 | ||
714 | if ((total_len > (unsigned int)INT_MAX) || | 699 | if (total_len > (unsigned int)INT_MAX) { |
715 | (m->msg_iovlen > (unsigned int)INT_MAX)) { | ||
716 | res = -EMSGSIZE; | 700 | res = -EMSGSIZE; |
717 | goto exit; | 701 | goto exit; |
718 | } | 702 | } |
@@ -806,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) | |||
806 | if (addr) { | 790 | if (addr) { |
807 | addr->family = AF_TIPC; | 791 | addr->family = AF_TIPC; |
808 | addr->addrtype = TIPC_ADDR_ID; | 792 | addr->addrtype = TIPC_ADDR_ID; |
793 | memset(&addr->addr, 0, sizeof(addr->addr)); | ||
809 | addr->addr.id.ref = msg_origport(msg); | 794 | addr->addr.id.ref = msg_origport(msg); |
810 | addr->addr.id.node = msg_orignode(msg); | 795 | addr->addr.id.node = msg_orignode(msg); |
811 | addr->addr.name.domain = 0; /* could leave uninitialized */ | 796 | addr->addr.name.domain = 0; /* could leave uninitialized */ |
@@ -920,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
920 | goto exit; | 905 | goto exit; |
921 | } | 906 | } |
922 | 907 | ||
908 | /* will be updated in set_orig_addr() if needed */ | ||
909 | m->msg_namelen = 0; | ||
910 | |||
923 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 911 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
924 | restart: | 912 | restart: |
925 | 913 | ||
@@ -1029,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
1029 | goto exit; | 1017 | goto exit; |
1030 | } | 1018 | } |
1031 | 1019 | ||
1020 | /* will be updated in set_orig_addr() if needed */ | ||
1021 | m->msg_namelen = 0; | ||
1022 | |||
1032 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); | 1023 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); |
1033 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 1024 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1034 | 1025 | ||
@@ -1155,34 +1146,6 @@ static void tipc_data_ready(struct sock *sk, int len) | |||
1155 | } | 1146 | } |
1156 | 1147 | ||
1157 | /** | 1148 | /** |
1158 | * rx_queue_full - determine if receive queue can accept another message | ||
1159 | * @msg: message to be added to queue | ||
1160 | * @queue_size: current size of queue | ||
1161 | * @base: nominal maximum size of queue | ||
1162 | * | ||
1163 | * Returns 1 if queue is unable to accept message, 0 otherwise | ||
1164 | */ | ||
1165 | static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) | ||
1166 | { | ||
1167 | u32 threshold; | ||
1168 | u32 imp = msg_importance(msg); | ||
1169 | |||
1170 | if (imp == TIPC_LOW_IMPORTANCE) | ||
1171 | threshold = base; | ||
1172 | else if (imp == TIPC_MEDIUM_IMPORTANCE) | ||
1173 | threshold = base * 2; | ||
1174 | else if (imp == TIPC_HIGH_IMPORTANCE) | ||
1175 | threshold = base * 100; | ||
1176 | else | ||
1177 | return 0; | ||
1178 | |||
1179 | if (msg_connected(msg)) | ||
1180 | threshold *= 4; | ||
1181 | |||
1182 | return queue_size >= threshold; | ||
1183 | } | ||
1184 | |||
1185 | /** | ||
1186 | * filter_connect - Handle all incoming messages for a connection-based socket | 1149 | * filter_connect - Handle all incoming messages for a connection-based socket |
1187 | * @tsock: TIPC socket | 1150 | * @tsock: TIPC socket |
1188 | * @msg: message | 1151 | * @msg: message |
@@ -1260,6 +1223,36 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1260 | } | 1223 | } |
1261 | 1224 | ||
1262 | /** | 1225 | /** |
1226 | * rcvbuf_limit - get proper overload limit of socket receive queue | ||
1227 | * @sk: socket | ||
1228 | * @buf: message | ||
1229 | * | ||
1230 | * For all connection oriented messages, irrespective of importance, | ||
1231 | * the default overload value (i.e. 67MB) is set as limit. | ||
1232 | * | ||
1233 | * For all connectionless messages, by default new queue limits are | ||
1234 | * as belows: | ||
1235 | * | ||
1236 | * TIPC_LOW_IMPORTANCE (5MB) | ||
1237 | * TIPC_MEDIUM_IMPORTANCE (10MB) | ||
1238 | * TIPC_HIGH_IMPORTANCE (20MB) | ||
1239 | * TIPC_CRITICAL_IMPORTANCE (40MB) | ||
1240 | * | ||
1241 | * Returns overload limit according to corresponding message importance | ||
1242 | */ | ||
1243 | static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) | ||
1244 | { | ||
1245 | struct tipc_msg *msg = buf_msg(buf); | ||
1246 | unsigned int limit; | ||
1247 | |||
1248 | if (msg_connected(msg)) | ||
1249 | limit = CONN_OVERLOAD_LIMIT; | ||
1250 | else | ||
1251 | limit = sk->sk_rcvbuf << (msg_importance(msg) + 5); | ||
1252 | return limit; | ||
1253 | } | ||
1254 | |||
1255 | /** | ||
1263 | * filter_rcv - validate incoming message | 1256 | * filter_rcv - validate incoming message |
1264 | * @sk: socket | 1257 | * @sk: socket |
1265 | * @buf: message | 1258 | * @buf: message |
@@ -1275,7 +1268,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1275 | { | 1268 | { |
1276 | struct socket *sock = sk->sk_socket; | 1269 | struct socket *sock = sk->sk_socket; |
1277 | struct tipc_msg *msg = buf_msg(buf); | 1270 | struct tipc_msg *msg = buf_msg(buf); |
1278 | u32 recv_q_len; | 1271 | unsigned int limit = rcvbuf_limit(sk, buf); |
1279 | u32 res = TIPC_OK; | 1272 | u32 res = TIPC_OK; |
1280 | 1273 | ||
1281 | /* Reject message if it is wrong sort of message for socket */ | 1274 | /* Reject message if it is wrong sort of message for socket */ |
@@ -1292,15 +1285,13 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1292 | } | 1285 | } |
1293 | 1286 | ||
1294 | /* Reject message if there isn't room to queue it */ | 1287 | /* Reject message if there isn't room to queue it */ |
1295 | recv_q_len = skb_queue_len(&sk->sk_receive_queue); | 1288 | if (sk_rmem_alloc_get(sk) + buf->truesize >= limit) |
1296 | if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { | 1289 | return TIPC_ERR_OVERLOAD; |
1297 | if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2)) | ||
1298 | return TIPC_ERR_OVERLOAD; | ||
1299 | } | ||
1300 | 1290 | ||
1301 | /* Enqueue message (finally!) */ | 1291 | /* Enqueue message */ |
1302 | TIPC_SKB_CB(buf)->handle = 0; | 1292 | TIPC_SKB_CB(buf)->handle = 0; |
1303 | __skb_queue_tail(&sk->sk_receive_queue, buf); | 1293 | __skb_queue_tail(&sk->sk_receive_queue, buf); |
1294 | skb_set_owner_r(buf, sk); | ||
1304 | 1295 | ||
1305 | sk->sk_data_ready(sk, 0); | 1296 | sk->sk_data_ready(sk, 0); |
1306 | return TIPC_OK; | 1297 | return TIPC_OK; |
@@ -1349,7 +1340,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) | |||
1349 | if (!sock_owned_by_user(sk)) { | 1340 | if (!sock_owned_by_user(sk)) { |
1350 | res = filter_rcv(sk, buf); | 1341 | res = filter_rcv(sk, buf); |
1351 | } else { | 1342 | } else { |
1352 | if (sk_add_backlog(sk, buf, sk->sk_rcvbuf)) | 1343 | if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf))) |
1353 | res = TIPC_ERR_OVERLOAD; | 1344 | res = TIPC_ERR_OVERLOAD; |
1354 | else | 1345 | else |
1355 | res = TIPC_OK; | 1346 | res = TIPC_OK; |
@@ -1583,6 +1574,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) | |||
1583 | } else { | 1574 | } else { |
1584 | __skb_dequeue(&sk->sk_receive_queue); | 1575 | __skb_dequeue(&sk->sk_receive_queue); |
1585 | __skb_queue_head(&new_sk->sk_receive_queue, buf); | 1576 | __skb_queue_head(&new_sk->sk_receive_queue, buf); |
1577 | skb_set_owner_r(buf, new_sk); | ||
1586 | } | 1578 | } |
1587 | release_sock(new_sk); | 1579 | release_sock(new_sk); |
1588 | 1580 | ||
@@ -1637,7 +1629,7 @@ restart: | |||
1637 | case SS_DISCONNECTING: | 1629 | case SS_DISCONNECTING: |
1638 | 1630 | ||
1639 | /* Discard any unreceived messages */ | 1631 | /* Discard any unreceived messages */ |
1640 | discard_rx_queue(sk); | 1632 | __skb_queue_purge(&sk->sk_receive_queue); |
1641 | 1633 | ||
1642 | /* Wake up anyone sleeping in poll */ | 1634 | /* Wake up anyone sleeping in poll */ |
1643 | sk->sk_state_change(sk); | 1635 | sk->sk_state_change(sk); |