diff options
Diffstat (limited to 'net/tipc')
| -rw-r--r-- | net/tipc/Kconfig | 8 | ||||
| -rw-r--r-- | net/tipc/Makefile | 1 | ||||
| -rw-r--r-- | net/tipc/addr.c | 7 | ||||
| -rw-r--r-- | net/tipc/addr.h | 1 | ||||
| -rw-r--r-- | net/tipc/bcast.c | 95 | ||||
| -rw-r--r-- | net/tipc/bcast.h | 4 | ||||
| -rw-r--r-- | net/tipc/bearer.c | 30 | ||||
| -rw-r--r-- | net/tipc/bearer.h | 17 | ||||
| -rw-r--r-- | net/tipc/core.c | 2 | ||||
| -rw-r--r-- | net/tipc/discover.c | 11 | ||||
| -rw-r--r-- | net/tipc/eth_media.c | 8 | ||||
| -rw-r--r-- | net/tipc/ib_media.c | 2 | ||||
| -rw-r--r-- | net/tipc/link.c | 873 | ||||
| -rw-r--r-- | net/tipc/link.h | 51 | ||||
| -rw-r--r-- | net/tipc/msg.c | 130 | ||||
| -rw-r--r-- | net/tipc/msg.h | 133 | ||||
| -rw-r--r-- | net/tipc/name_distr.c | 4 | ||||
| -rw-r--r-- | net/tipc/name_table.c | 4 | ||||
| -rw-r--r-- | net/tipc/node.c | 116 | ||||
| -rw-r--r-- | net/tipc/node.h | 18 | ||||
| -rw-r--r-- | net/tipc/server.c | 42 | ||||
| -rw-r--r-- | net/tipc/socket.c | 277 | ||||
| -rw-r--r-- | net/tipc/socket.h | 4 | ||||
| -rw-r--r-- | net/tipc/subscr.c | 23 | ||||
| -rw-r--r-- | net/tipc/udp_media.c | 448 | 
25 files changed, 1331 insertions, 978 deletions
| diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig index 91c8a8e031db..c25a3a149dc4 100644 --- a/net/tipc/Kconfig +++ b/net/tipc/Kconfig | |||
| @@ -26,3 +26,11 @@ config TIPC_MEDIA_IB | |||
| 26 | help | 26 | help | 
| 27 | Saying Y here will enable support for running TIPC on | 27 | Saying Y here will enable support for running TIPC on | 
| 28 | IP-over-InfiniBand devices. | 28 | IP-over-InfiniBand devices. | 
| 29 | config TIPC_MEDIA_UDP | ||
| 30 | bool "IP/UDP media type support" | ||
| 31 | depends on TIPC | ||
| 32 | select NET_UDP_TUNNEL | ||
| 33 | help | ||
| 34 | Saying Y here will enable support for running TIPC over IP/UDP | ||
| 35 | bool | ||
| 36 | default y | ||
| diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 599b1a540d2b..57e460be4692 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile | |||
| @@ -10,5 +10,6 @@ tipc-y += addr.o bcast.o bearer.o \ | |||
| 10 | netlink.o netlink_compat.o node.o socket.o eth_media.o \ | 10 | netlink.o netlink_compat.o node.o socket.o eth_media.o \ | 
| 11 | server.o socket.o | 11 | server.o socket.o | 
| 12 | 12 | ||
| 13 | tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o | ||
| 13 | tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o | 14 | tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o | 
| 14 | tipc-$(CONFIG_SYSCTL) += sysctl.o | 15 | tipc-$(CONFIG_SYSCTL) += sysctl.o | 
| diff --git a/net/tipc/addr.c b/net/tipc/addr.c index 48fd3b5a73fb..ba7daa864d44 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c | |||
| @@ -38,6 +38,13 @@ | |||
| 38 | #include "addr.h" | 38 | #include "addr.h" | 
| 39 | #include "core.h" | 39 | #include "core.h" | 
| 40 | 40 | ||
| 41 | u32 tipc_own_addr(struct net *net) | ||
| 42 | { | ||
| 43 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
| 44 | |||
| 45 | return tn->own_addr; | ||
| 46 | } | ||
| 47 | |||
| 41 | /** | 48 | /** | 
| 42 | * in_own_cluster - test for cluster inclusion; <0.0.0> always matches | 49 | * in_own_cluster - test for cluster inclusion; <0.0.0> always matches | 
| 43 | */ | 50 | */ | 
| diff --git a/net/tipc/addr.h b/net/tipc/addr.h index c700c2d28e09..7ba6d5c8ae40 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h | |||
| @@ -55,6 +55,7 @@ static inline u32 tipc_cluster_mask(u32 addr) | |||
| 55 | return addr & TIPC_CLUSTER_MASK; | 55 | return addr & TIPC_CLUSTER_MASK; | 
| 56 | } | 56 | } | 
| 57 | 57 | ||
| 58 | u32 tipc_own_addr(struct net *net); | ||
| 58 | int in_own_cluster(struct net *net, u32 addr); | 59 | int in_own_cluster(struct net *net, u32 addr); | 
| 59 | int in_own_cluster_exact(struct net *net, u32 addr); | 60 | int in_own_cluster_exact(struct net *net, u32 addr); | 
| 60 | int in_own_node(struct net *net, u32 addr); | 61 | int in_own_node(struct net *net, u32 addr); | 
| diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 3e41704832de..c5cbdcb1f0b5 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
| @@ -62,21 +62,8 @@ static void tipc_bclink_lock(struct net *net) | |||
| 62 | static void tipc_bclink_unlock(struct net *net) | 62 | static void tipc_bclink_unlock(struct net *net) | 
| 63 | { | 63 | { | 
| 64 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 64 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 
| 65 | struct tipc_node *node = NULL; | ||
| 66 | 65 | ||
| 67 | if (likely(!tn->bclink->flags)) { | ||
| 68 | spin_unlock_bh(&tn->bclink->lock); | ||
| 69 | return; | ||
| 70 | } | ||
| 71 | |||
| 72 | if (tn->bclink->flags & TIPC_BCLINK_RESET) { | ||
| 73 | tn->bclink->flags &= ~TIPC_BCLINK_RESET; | ||
| 74 | node = tipc_bclink_retransmit_to(net); | ||
| 75 | } | ||
| 76 | spin_unlock_bh(&tn->bclink->lock); | 66 | spin_unlock_bh(&tn->bclink->lock); | 
| 77 | |||
| 78 | if (node) | ||
| 79 | tipc_link_reset_all(node); | ||
| 80 | } | 67 | } | 
| 81 | 68 | ||
| 82 | void tipc_bclink_input(struct net *net) | 69 | void tipc_bclink_input(struct net *net) | 
| @@ -91,13 +78,6 @@ uint tipc_bclink_get_mtu(void) | |||
| 91 | return MAX_PKT_DEFAULT_MCAST; | 78 | return MAX_PKT_DEFAULT_MCAST; | 
| 92 | } | 79 | } | 
| 93 | 80 | ||
| 94 | void tipc_bclink_set_flags(struct net *net, unsigned int flags) | ||
| 95 | { | ||
| 96 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
| 97 | |||
| 98 | tn->bclink->flags |= flags; | ||
| 99 | } | ||
| 100 | |||
| 101 | static u32 bcbuf_acks(struct sk_buff *buf) | 81 | static u32 bcbuf_acks(struct sk_buff *buf) | 
| 102 | { | 82 | { | 
| 103 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; | 83 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; | 
| @@ -135,9 +115,10 @@ static void bclink_set_last_sent(struct net *net) | |||
| 135 | { | 115 | { | 
| 136 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 116 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 
| 137 | struct tipc_link *bcl = tn->bcl; | 117 | struct tipc_link *bcl = tn->bcl; | 
| 118 | struct sk_buff *skb = skb_peek(&bcl->backlogq); | ||
| 138 | 119 | ||
| 139 | if (bcl->next_out) | 120 | if (skb) | 
| 140 | bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); | 121 | bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1); | 
| 141 | else | 122 | else | 
| 142 | bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); | 123 | bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); | 
| 143 | } | 124 | } | 
| @@ -155,7 +136,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) | |||
| 155 | seqno : node->bclink.last_sent; | 136 | seqno : node->bclink.last_sent; | 
| 156 | } | 137 | } | 
| 157 | 138 | ||
| 158 | |||
| 159 | /** | 139 | /** | 
| 160 | * tipc_bclink_retransmit_to - get most recent node to request retransmission | 140 | * tipc_bclink_retransmit_to - get most recent node to request retransmission | 
| 161 | * | 141 | * | 
| @@ -180,7 +160,7 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to) | |||
| 180 | struct sk_buff *skb; | 160 | struct sk_buff *skb; | 
| 181 | struct tipc_link *bcl = tn->bcl; | 161 | struct tipc_link *bcl = tn->bcl; | 
| 182 | 162 | ||
| 183 | skb_queue_walk(&bcl->outqueue, skb) { | 163 | skb_queue_walk(&bcl->transmq, skb) { | 
| 184 | if (more(buf_seqno(skb), after)) { | 164 | if (more(buf_seqno(skb), after)) { | 
| 185 | tipc_link_retransmit(bcl, skb, mod(to - after)); | 165 | tipc_link_retransmit(bcl, skb, mod(to - after)); | 
| 186 | break; | 166 | break; | 
| @@ -210,14 +190,17 @@ void tipc_bclink_wakeup_users(struct net *net) | |||
| 210 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | 190 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | 
| 211 | { | 191 | { | 
| 212 | struct sk_buff *skb, *tmp; | 192 | struct sk_buff *skb, *tmp; | 
| 213 | struct sk_buff *next; | ||
| 214 | unsigned int released = 0; | 193 | unsigned int released = 0; | 
| 215 | struct net *net = n_ptr->net; | 194 | struct net *net = n_ptr->net; | 
| 216 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 195 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 
| 217 | 196 | ||
| 197 | if (unlikely(!n_ptr->bclink.recv_permitted)) | ||
| 198 | return; | ||
| 199 | |||
| 218 | tipc_bclink_lock(net); | 200 | tipc_bclink_lock(net); | 
| 201 | |||
| 219 | /* Bail out if tx queue is empty (no clean up is required) */ | 202 | /* Bail out if tx queue is empty (no clean up is required) */ | 
| 220 | skb = skb_peek(&tn->bcl->outqueue); | 203 | skb = skb_peek(&tn->bcl->transmq); | 
| 221 | if (!skb) | 204 | if (!skb) | 
| 222 | goto exit; | 205 | goto exit; | 
| 223 | 206 | ||
| @@ -244,27 +227,19 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | |||
| 244 | } | 227 | } | 
| 245 | 228 | ||
| 246 | /* Skip over packets that node has previously acknowledged */ | 229 | /* Skip over packets that node has previously acknowledged */ | 
| 247 | skb_queue_walk(&tn->bcl->outqueue, skb) { | 230 | skb_queue_walk(&tn->bcl->transmq, skb) { | 
| 248 | if (more(buf_seqno(skb), n_ptr->bclink.acked)) | 231 | if (more(buf_seqno(skb), n_ptr->bclink.acked)) | 
| 249 | break; | 232 | break; | 
| 250 | } | 233 | } | 
| 251 | 234 | ||
| 252 | /* Update packets that node is now acknowledging */ | 235 | /* Update packets that node is now acknowledging */ | 
| 253 | skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) { | 236 | skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) { | 
| 254 | if (more(buf_seqno(skb), acked)) | 237 | if (more(buf_seqno(skb), acked)) | 
| 255 | break; | 238 | break; | 
| 256 | 239 | bcbuf_decr_acks(skb); | |
| 257 | next = tipc_skb_queue_next(&tn->bcl->outqueue, skb); | 240 | bclink_set_last_sent(net); | 
| 258 | if (skb != tn->bcl->next_out) { | ||
| 259 | bcbuf_decr_acks(skb); | ||
| 260 | } else { | ||
| 261 | bcbuf_set_acks(skb, 0); | ||
| 262 | tn->bcl->next_out = next; | ||
| 263 | bclink_set_last_sent(net); | ||
| 264 | } | ||
| 265 | |||
| 266 | if (bcbuf_acks(skb) == 0) { | 241 | if (bcbuf_acks(skb) == 0) { | 
| 267 | __skb_unlink(skb, &tn->bcl->outqueue); | 242 | __skb_unlink(skb, &tn->bcl->transmq); | 
| 268 | kfree_skb(skb); | 243 | kfree_skb(skb); | 
| 269 | released = 1; | 244 | released = 1; | 
| 270 | } | 245 | } | 
| @@ -272,7 +247,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | |||
| 272 | n_ptr->bclink.acked = acked; | 247 | n_ptr->bclink.acked = acked; | 
| 273 | 248 | ||
| 274 | /* Try resolving broadcast link congestion, if necessary */ | 249 | /* Try resolving broadcast link congestion, if necessary */ | 
| 275 | if (unlikely(tn->bcl->next_out)) { | 250 | if (unlikely(skb_peek(&tn->bcl->backlogq))) { | 
| 276 | tipc_link_push_packets(tn->bcl); | 251 | tipc_link_push_packets(tn->bcl); | 
| 277 | bclink_set_last_sent(net); | 252 | bclink_set_last_sent(net); | 
| 278 | } | 253 | } | 
| @@ -319,7 +294,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, | |||
| 319 | buf = tipc_buf_acquire(INT_H_SIZE); | 294 | buf = tipc_buf_acquire(INT_H_SIZE); | 
| 320 | if (buf) { | 295 | if (buf) { | 
| 321 | struct tipc_msg *msg = buf_msg(buf); | 296 | struct tipc_msg *msg = buf_msg(buf); | 
| 322 | struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); | 297 | struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq); | 
| 323 | u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; | 298 | u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; | 
| 324 | 299 | ||
| 325 | tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG, | 300 | tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG, | 
| @@ -354,13 +329,12 @@ static void bclink_peek_nack(struct net *net, struct tipc_msg *msg) | |||
| 354 | return; | 329 | return; | 
| 355 | 330 | ||
| 356 | tipc_node_lock(n_ptr); | 331 | tipc_node_lock(n_ptr); | 
| 357 | |||
| 358 | if (n_ptr->bclink.recv_permitted && | 332 | if (n_ptr->bclink.recv_permitted && | 
| 359 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && | 333 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && | 
| 360 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) | 334 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) | 
| 361 | n_ptr->bclink.oos_state = 2; | 335 | n_ptr->bclink.oos_state = 2; | 
| 362 | |||
| 363 | tipc_node_unlock(n_ptr); | 336 | tipc_node_unlock(n_ptr); | 
| 337 | tipc_node_put(n_ptr); | ||
| 364 | } | 338 | } | 
| 365 | 339 | ||
| 366 | /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster | 340 | /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster | 
| @@ -387,14 +361,13 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list) | |||
| 387 | __skb_queue_purge(list); | 361 | __skb_queue_purge(list); | 
| 388 | return -EHOSTUNREACH; | 362 | return -EHOSTUNREACH; | 
| 389 | } | 363 | } | 
| 390 | |||
| 391 | /* Broadcast to all nodes */ | 364 | /* Broadcast to all nodes */ | 
| 392 | if (likely(bclink)) { | 365 | if (likely(bclink)) { | 
| 393 | tipc_bclink_lock(net); | 366 | tipc_bclink_lock(net); | 
| 394 | if (likely(bclink->bcast_nodes.count)) { | 367 | if (likely(bclink->bcast_nodes.count)) { | 
| 395 | rc = __tipc_link_xmit(net, bcl, list); | 368 | rc = __tipc_link_xmit(net, bcl, list); | 
| 396 | if (likely(!rc)) { | 369 | if (likely(!rc)) { | 
| 397 | u32 len = skb_queue_len(&bcl->outqueue); | 370 | u32 len = skb_queue_len(&bcl->transmq); | 
| 398 | 371 | ||
| 399 | bclink_set_last_sent(net); | 372 | bclink_set_last_sent(net); | 
| 400 | bcl->stats.queue_sz_counts++; | 373 | bcl->stats.queue_sz_counts++; | 
| @@ -440,7 +413,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) | |||
| 440 | */ | 413 | */ | 
| 441 | if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { | 414 | if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { | 
| 442 | tipc_link_proto_xmit(node->active_links[node->addr & 1], | 415 | tipc_link_proto_xmit(node->active_links[node->addr & 1], | 
| 443 | STATE_MSG, 0, 0, 0, 0, 0); | 416 | STATE_MSG, 0, 0, 0, 0); | 
| 444 | tn->bcl->stats.sent_acks++; | 417 | tn->bcl->stats.sent_acks++; | 
| 445 | } | 418 | } | 
| 446 | } | 419 | } | 
| @@ -481,17 +454,18 @@ void tipc_bclink_rcv(struct net *net, struct sk_buff *buf) | |||
| 481 | goto unlock; | 454 | goto unlock; | 
| 482 | if (msg_destnode(msg) == tn->own_addr) { | 455 | if (msg_destnode(msg) == tn->own_addr) { | 
| 483 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); | 456 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); | 
| 484 | tipc_node_unlock(node); | ||
| 485 | tipc_bclink_lock(net); | 457 | tipc_bclink_lock(net); | 
| 486 | bcl->stats.recv_nacks++; | 458 | bcl->stats.recv_nacks++; | 
| 487 | tn->bclink->retransmit_to = node; | 459 | tn->bclink->retransmit_to = node; | 
| 488 | bclink_retransmit_pkt(tn, msg_bcgap_after(msg), | 460 | bclink_retransmit_pkt(tn, msg_bcgap_after(msg), | 
| 489 | msg_bcgap_to(msg)); | 461 | msg_bcgap_to(msg)); | 
| 490 | tipc_bclink_unlock(net); | 462 | tipc_bclink_unlock(net); | 
| 463 | tipc_node_unlock(node); | ||
| 491 | } else { | 464 | } else { | 
| 492 | tipc_node_unlock(node); | 465 | tipc_node_unlock(node); | 
| 493 | bclink_peek_nack(net, msg); | 466 | bclink_peek_nack(net, msg); | 
| 494 | } | 467 | } | 
| 468 | tipc_node_put(node); | ||
| 495 | goto exit; | 469 | goto exit; | 
| 496 | } | 470 | } | 
| 497 | 471 | ||
| @@ -528,11 +502,13 @@ receive: | |||
| 528 | tipc_bclink_unlock(net); | 502 | tipc_bclink_unlock(net); | 
| 529 | tipc_node_unlock(node); | 503 | tipc_node_unlock(node); | 
| 530 | } else if (msg_user(msg) == MSG_FRAGMENTER) { | 504 | } else if (msg_user(msg) == MSG_FRAGMENTER) { | 
| 531 | tipc_buf_append(&node->bclink.reasm_buf, &buf); | ||
| 532 | if (unlikely(!buf && !node->bclink.reasm_buf)) | ||
| 533 | goto unlock; | ||
| 534 | tipc_bclink_lock(net); | 505 | tipc_bclink_lock(net); | 
| 535 | bclink_accept_pkt(node, seqno); | 506 | bclink_accept_pkt(node, seqno); | 
| 507 | tipc_buf_append(&node->bclink.reasm_buf, &buf); | ||
| 508 | if (unlikely(!buf && !node->bclink.reasm_buf)) { | ||
| 509 | tipc_bclink_unlock(net); | ||
| 510 | goto unlock; | ||
| 511 | } | ||
| 536 | bcl->stats.recv_fragments++; | 512 | bcl->stats.recv_fragments++; | 
| 537 | if (buf) { | 513 | if (buf) { | 
| 538 | bcl->stats.recv_fragmented++; | 514 | bcl->stats.recv_fragmented++; | 
| @@ -559,25 +535,25 @@ receive: | |||
| 559 | if (node->bclink.last_in == node->bclink.last_sent) | 535 | if (node->bclink.last_in == node->bclink.last_sent) | 
| 560 | goto unlock; | 536 | goto unlock; | 
| 561 | 537 | ||
| 562 | if (skb_queue_empty(&node->bclink.deferred_queue)) { | 538 | if (skb_queue_empty(&node->bclink.deferdq)) { | 
| 563 | node->bclink.oos_state = 1; | 539 | node->bclink.oos_state = 1; | 
| 564 | goto unlock; | 540 | goto unlock; | 
| 565 | } | 541 | } | 
| 566 | 542 | ||
| 567 | msg = buf_msg(skb_peek(&node->bclink.deferred_queue)); | 543 | msg = buf_msg(skb_peek(&node->bclink.deferdq)); | 
| 568 | seqno = msg_seqno(msg); | 544 | seqno = msg_seqno(msg); | 
| 569 | next_in = mod(next_in + 1); | 545 | next_in = mod(next_in + 1); | 
| 570 | if (seqno != next_in) | 546 | if (seqno != next_in) | 
| 571 | goto unlock; | 547 | goto unlock; | 
| 572 | 548 | ||
| 573 | /* Take in-sequence message from deferred queue & deliver it */ | 549 | /* Take in-sequence message from deferred queue & deliver it */ | 
| 574 | buf = __skb_dequeue(&node->bclink.deferred_queue); | 550 | buf = __skb_dequeue(&node->bclink.deferdq); | 
| 575 | goto receive; | 551 | goto receive; | 
| 576 | } | 552 | } | 
| 577 | 553 | ||
| 578 | /* Handle out-of-sequence broadcast message */ | 554 | /* Handle out-of-sequence broadcast message */ | 
| 579 | if (less(next_in, seqno)) { | 555 | if (less(next_in, seqno)) { | 
| 580 | deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue, | 556 | deferred = tipc_link_defer_pkt(&node->bclink.deferdq, | 
| 581 | buf); | 557 | buf); | 
| 582 | bclink_update_last_sent(node, seqno); | 558 | bclink_update_last_sent(node, seqno); | 
| 583 | buf = NULL; | 559 | buf = NULL; | 
| @@ -594,6 +570,7 @@ receive: | |||
| 594 | 570 | ||
| 595 | unlock: | 571 | unlock: | 
| 596 | tipc_node_unlock(node); | 572 | tipc_node_unlock(node); | 
| 573 | tipc_node_put(node); | ||
| 597 | exit: | 574 | exit: | 
| 598 | kfree_skb(buf); | 575 | kfree_skb(buf); | 
| 599 | } | 576 | } | 
| @@ -634,7 +611,6 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf, | |||
| 634 | msg_set_non_seq(msg, 1); | 611 | msg_set_non_seq(msg, 1); | 
| 635 | msg_set_mc_netid(msg, tn->net_id); | 612 | msg_set_mc_netid(msg, tn->net_id); | 
| 636 | tn->bcl->stats.sent_info++; | 613 | tn->bcl->stats.sent_info++; | 
| 637 | |||
| 638 | if (WARN_ON(!bclink->bcast_nodes.count)) { | 614 | if (WARN_ON(!bclink->bcast_nodes.count)) { | 
| 639 | dump_stack(); | 615 | dump_stack(); | 
| 640 | return 0; | 616 | return 0; | 
| @@ -835,7 +811,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) | |||
| 835 | prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); | 811 | prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); | 
| 836 | if (!prop) | 812 | if (!prop) | 
| 837 | goto attr_msg_full; | 813 | goto attr_msg_full; | 
| 838 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0])) | 814 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) | 
| 839 | goto prop_msg_full; | 815 | goto prop_msg_full; | 
| 840 | nla_nest_end(msg->skb, prop); | 816 | nla_nest_end(msg->skb, prop); | 
| 841 | 817 | ||
| @@ -913,8 +889,9 @@ int tipc_bclink_init(struct net *net) | |||
| 913 | sprintf(bcbearer->media.name, "tipc-broadcast"); | 889 | sprintf(bcbearer->media.name, "tipc-broadcast"); | 
| 914 | 890 | ||
| 915 | spin_lock_init(&bclink->lock); | 891 | spin_lock_init(&bclink->lock); | 
| 916 | __skb_queue_head_init(&bcl->outqueue); | 892 | __skb_queue_head_init(&bcl->transmq); | 
| 917 | __skb_queue_head_init(&bcl->deferred_queue); | 893 | __skb_queue_head_init(&bcl->backlogq); | 
| 894 | __skb_queue_head_init(&bcl->deferdq); | ||
| 918 | skb_queue_head_init(&bcl->wakeupq); | 895 | skb_queue_head_init(&bcl->wakeupq); | 
| 919 | bcl->next_out_no = 1; | 896 | bcl->next_out_no = 1; | 
| 920 | spin_lock_init(&bclink->node.lock); | 897 | spin_lock_init(&bclink->node.lock); | 
| @@ -922,7 +899,7 @@ int tipc_bclink_init(struct net *net) | |||
| 922 | skb_queue_head_init(&bclink->inputq); | 899 | skb_queue_head_init(&bclink->inputq); | 
| 923 | bcl->owner = &bclink->node; | 900 | bcl->owner = &bclink->node; | 
| 924 | bcl->owner->net = net; | 901 | bcl->owner->net = net; | 
| 925 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 902 | bcl->mtu = MAX_PKT_DEFAULT_MCAST; | 
| 926 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 903 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 
| 927 | bcl->bearer_id = MAX_BEARERS; | 904 | bcl->bearer_id = MAX_BEARERS; | 
| 928 | rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); | 905 | rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); | 
| diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 43f397fbac55..4bdc12277d33 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h | |||
| @@ -55,7 +55,6 @@ struct tipc_bcbearer_pair { | |||
| 55 | struct tipc_bearer *secondary; | 55 | struct tipc_bearer *secondary; | 
| 56 | }; | 56 | }; | 
| 57 | 57 | ||
| 58 | #define TIPC_BCLINK_RESET 1 | ||
| 59 | #define BCBEARER MAX_BEARERS | 58 | #define BCBEARER MAX_BEARERS | 
| 60 | 59 | ||
| 61 | /** | 60 | /** | 
| @@ -86,7 +85,6 @@ struct tipc_bcbearer { | |||
| 86 | * @lock: spinlock governing access to structure | 85 | * @lock: spinlock governing access to structure | 
| 87 | * @link: (non-standard) broadcast link structure | 86 | * @link: (non-standard) broadcast link structure | 
| 88 | * @node: (non-standard) node structure representing b'cast link's peer node | 87 | * @node: (non-standard) node structure representing b'cast link's peer node | 
| 89 | * @flags: represent bclink states | ||
| 90 | * @bcast_nodes: map of broadcast-capable nodes | 88 | * @bcast_nodes: map of broadcast-capable nodes | 
| 91 | * @retransmit_to: node that most recently requested a retransmit | 89 | * @retransmit_to: node that most recently requested a retransmit | 
| 92 | * | 90 | * | 
| @@ -96,7 +94,6 @@ struct tipc_bclink { | |||
| 96 | spinlock_t lock; | 94 | spinlock_t lock; | 
| 97 | struct tipc_link link; | 95 | struct tipc_link link; | 
| 98 | struct tipc_node node; | 96 | struct tipc_node node; | 
| 99 | unsigned int flags; | ||
| 100 | struct sk_buff_head arrvq; | 97 | struct sk_buff_head arrvq; | 
| 101 | struct sk_buff_head inputq; | 98 | struct sk_buff_head inputq; | 
| 102 | struct tipc_node_map bcast_nodes; | 99 | struct tipc_node_map bcast_nodes; | 
| @@ -117,7 +114,6 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, | |||
| 117 | 114 | ||
| 118 | int tipc_bclink_init(struct net *net); | 115 | int tipc_bclink_init(struct net *net); | 
| 119 | void tipc_bclink_stop(struct net *net); | 116 | void tipc_bclink_stop(struct net *net); | 
| 120 | void tipc_bclink_set_flags(struct net *tn, unsigned int flags); | ||
| 121 | void tipc_bclink_add_node(struct net *net, u32 addr); | 117 | void tipc_bclink_add_node(struct net *net, u32 addr); | 
| 122 | void tipc_bclink_remove_node(struct net *net, u32 addr); | 118 | void tipc_bclink_remove_node(struct net *net, u32 addr); | 
| 123 | struct tipc_node *tipc_bclink_retransmit_to(struct net *tn); | 119 | struct tipc_node *tipc_bclink_retransmit_to(struct net *tn); | 
| diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 48852c2dcc03..70e3dacbf84a 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
| @@ -48,6 +48,9 @@ static struct tipc_media * const media_info_array[] = { | |||
| 48 | #ifdef CONFIG_TIPC_MEDIA_IB | 48 | #ifdef CONFIG_TIPC_MEDIA_IB | 
| 49 | &ib_media_info, | 49 | &ib_media_info, | 
| 50 | #endif | 50 | #endif | 
| 51 | #ifdef CONFIG_TIPC_MEDIA_UDP | ||
| 52 | &udp_media_info, | ||
| 53 | #endif | ||
| 51 | NULL | 54 | NULL | 
| 52 | }; | 55 | }; | 
| 53 | 56 | ||
| @@ -216,7 +219,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) | |||
| 216 | * tipc_enable_bearer - enable bearer with the given name | 219 | * tipc_enable_bearer - enable bearer with the given name | 
| 217 | */ | 220 | */ | 
| 218 | static int tipc_enable_bearer(struct net *net, const char *name, | 221 | static int tipc_enable_bearer(struct net *net, const char *name, | 
| 219 | u32 disc_domain, u32 priority) | 222 | u32 disc_domain, u32 priority, | 
| 223 | struct nlattr *attr[]) | ||
| 220 | { | 224 | { | 
| 221 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 225 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 
| 222 | struct tipc_bearer *b_ptr; | 226 | struct tipc_bearer *b_ptr; | 
| @@ -304,7 +308,7 @@ restart: | |||
| 304 | 308 | ||
| 305 | strcpy(b_ptr->name, name); | 309 | strcpy(b_ptr->name, name); | 
| 306 | b_ptr->media = m_ptr; | 310 | b_ptr->media = m_ptr; | 
| 307 | res = m_ptr->enable_media(net, b_ptr); | 311 | res = m_ptr->enable_media(net, b_ptr, attr); | 
| 308 | if (res) { | 312 | if (res) { | 
| 309 | pr_warn("Bearer <%s> rejected, enable failure (%d)\n", | 313 | pr_warn("Bearer <%s> rejected, enable failure (%d)\n", | 
| 310 | name, -res); | 314 | name, -res); | 
| @@ -372,7 +376,8 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr, | |||
| 372 | kfree_rcu(b_ptr, rcu); | 376 | kfree_rcu(b_ptr, rcu); | 
| 373 | } | 377 | } | 
| 374 | 378 | ||
| 375 | int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b) | 379 | int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, | 
| 380 | struct nlattr *attr[]) | ||
| 376 | { | 381 | { | 
| 377 | struct net_device *dev; | 382 | struct net_device *dev; | 
| 378 | char *driver_name = strchr((const char *)b->name, ':') + 1; | 383 | char *driver_name = strchr((const char *)b->name, ':') + 1; | 
| @@ -586,14 +591,14 @@ void tipc_bearer_stop(struct net *net) | |||
| 586 | 591 | ||
| 587 | /* Caller should hold rtnl_lock to protect the bearer */ | 592 | /* Caller should hold rtnl_lock to protect the bearer */ | 
| 588 | static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, | 593 | static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, | 
| 589 | struct tipc_bearer *bearer) | 594 | struct tipc_bearer *bearer, int nlflags) | 
| 590 | { | 595 | { | 
| 591 | void *hdr; | 596 | void *hdr; | 
| 592 | struct nlattr *attrs; | 597 | struct nlattr *attrs; | 
| 593 | struct nlattr *prop; | 598 | struct nlattr *prop; | 
| 594 | 599 | ||
| 595 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 600 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 
| 596 | NLM_F_MULTI, TIPC_NL_BEARER_GET); | 601 | nlflags, TIPC_NL_BEARER_GET); | 
| 597 | if (!hdr) | 602 | if (!hdr) | 
| 598 | return -EMSGSIZE; | 603 | return -EMSGSIZE; | 
| 599 | 604 | ||
| @@ -652,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 652 | if (!bearer) | 657 | if (!bearer) | 
| 653 | continue; | 658 | continue; | 
| 654 | 659 | ||
| 655 | err = __tipc_nl_add_bearer(&msg, bearer); | 660 | err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI); | 
| 656 | if (err) | 661 | if (err) | 
| 657 | break; | 662 | break; | 
| 658 | } | 663 | } | 
| @@ -700,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) | |||
| 700 | goto err_out; | 705 | goto err_out; | 
| 701 | } | 706 | } | 
| 702 | 707 | ||
| 703 | err = __tipc_nl_add_bearer(&msg, bearer); | 708 | err = __tipc_nl_add_bearer(&msg, bearer, 0); | 
| 704 | if (err) | 709 | if (err) | 
| 705 | goto err_out; | 710 | goto err_out; | 
| 706 | rtnl_unlock(); | 711 | rtnl_unlock(); | 
| @@ -791,7 +796,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | |||
| 791 | } | 796 | } | 
| 792 | 797 | ||
| 793 | rtnl_lock(); | 798 | rtnl_lock(); | 
| 794 | err = tipc_enable_bearer(net, bearer, domain, prio); | 799 | err = tipc_enable_bearer(net, bearer, domain, prio, attrs); | 
| 795 | if (err) { | 800 | if (err) { | 
| 796 | rtnl_unlock(); | 801 | rtnl_unlock(); | 
| 797 | return err; | 802 | return err; | 
| @@ -852,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
| 852 | } | 857 | } | 
| 853 | 858 | ||
| 854 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, | 859 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, | 
| 855 | struct tipc_media *media) | 860 | struct tipc_media *media, int nlflags) | 
| 856 | { | 861 | { | 
| 857 | void *hdr; | 862 | void *hdr; | 
| 858 | struct nlattr *attrs; | 863 | struct nlattr *attrs; | 
| 859 | struct nlattr *prop; | 864 | struct nlattr *prop; | 
| 860 | 865 | ||
| 861 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 866 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 
| 862 | NLM_F_MULTI, TIPC_NL_MEDIA_GET); | 867 | nlflags, TIPC_NL_MEDIA_GET); | 
| 863 | if (!hdr) | 868 | if (!hdr) | 
| 864 | return -EMSGSIZE; | 869 | return -EMSGSIZE; | 
| 865 | 870 | ||
| @@ -911,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 911 | 916 | ||
| 912 | rtnl_lock(); | 917 | rtnl_lock(); | 
| 913 | for (; media_info_array[i] != NULL; i++) { | 918 | for (; media_info_array[i] != NULL; i++) { | 
| 914 | err = __tipc_nl_add_media(&msg, media_info_array[i]); | 919 | err = __tipc_nl_add_media(&msg, media_info_array[i], | 
| 920 | NLM_F_MULTI); | ||
| 915 | if (err) | 921 | if (err) | 
| 916 | break; | 922 | break; | 
| 917 | } | 923 | } | 
| @@ -958,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) | |||
| 958 | goto err_out; | 964 | goto err_out; | 
| 959 | } | 965 | } | 
| 960 | 966 | ||
| 961 | err = __tipc_nl_add_media(&msg, media); | 967 | err = __tipc_nl_add_media(&msg, media, 0); | 
| 962 | if (err) | 968 | if (err) | 
| 963 | goto err_out; | 969 | goto err_out; | 
| 964 | rtnl_unlock(); | 970 | rtnl_unlock(); | 
| diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 6b17795ff8bc..5cad243ee8fc 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | #include <net/genetlink.h> | 41 | #include <net/genetlink.h> | 
| 42 | 42 | ||
| 43 | #define MAX_BEARERS 2 | 43 | #define MAX_BEARERS 2 | 
| 44 | #define MAX_MEDIA 2 | 44 | #define MAX_MEDIA 3 | 
| 45 | #define MAX_NODES 4096 | 45 | #define MAX_NODES 4096 | 
| 46 | #define WSIZE 32 | 46 | #define WSIZE 32 | 
| 47 | 47 | ||
| @@ -50,14 +50,16 @@ | |||
| 50 | * - the field's actual content and length is defined per media | 50 | * - the field's actual content and length is defined per media | 
| 51 | * - remaining unused bytes in the field are set to zero | 51 | * - remaining unused bytes in the field are set to zero | 
| 52 | */ | 52 | */ | 
| 53 | #define TIPC_MEDIA_ADDR_SIZE 32 | 53 | #define TIPC_MEDIA_INFO_SIZE 32 | 
| 54 | #define TIPC_MEDIA_TYPE_OFFSET 3 | 54 | #define TIPC_MEDIA_TYPE_OFFSET 3 | 
| 55 | #define TIPC_MEDIA_ADDR_OFFSET 4 | ||
| 55 | 56 | ||
| 56 | /* | 57 | /* | 
| 57 | * Identifiers of supported TIPC media types | 58 | * Identifiers of supported TIPC media types | 
| 58 | */ | 59 | */ | 
| 59 | #define TIPC_MEDIA_TYPE_ETH 1 | 60 | #define TIPC_MEDIA_TYPE_ETH 1 | 
| 60 | #define TIPC_MEDIA_TYPE_IB 2 | 61 | #define TIPC_MEDIA_TYPE_IB 2 | 
| 62 | #define TIPC_MEDIA_TYPE_UDP 3 | ||
| 61 | 63 | ||
| 62 | /** | 64 | /** | 
| 63 | * struct tipc_node_map - set of node identifiers | 65 | * struct tipc_node_map - set of node identifiers | 
| @@ -76,7 +78,7 @@ struct tipc_node_map { | |||
| 76 | * @broadcast: non-zero if address is a broadcast address | 78 | * @broadcast: non-zero if address is a broadcast address | 
| 77 | */ | 79 | */ | 
| 78 | struct tipc_media_addr { | 80 | struct tipc_media_addr { | 
| 79 | u8 value[TIPC_MEDIA_ADDR_SIZE]; | 81 | u8 value[TIPC_MEDIA_INFO_SIZE]; | 
| 80 | u8 media_id; | 82 | u8 media_id; | 
| 81 | u8 broadcast; | 83 | u8 broadcast; | 
| 82 | }; | 84 | }; | 
| @@ -103,7 +105,8 @@ struct tipc_media { | |||
| 103 | int (*send_msg)(struct net *net, struct sk_buff *buf, | 105 | int (*send_msg)(struct net *net, struct sk_buff *buf, | 
| 104 | struct tipc_bearer *b_ptr, | 106 | struct tipc_bearer *b_ptr, | 
| 105 | struct tipc_media_addr *dest); | 107 | struct tipc_media_addr *dest); | 
| 106 | int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr); | 108 | int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr, | 
| 109 | struct nlattr *attr[]); | ||
| 107 | void (*disable_media)(struct tipc_bearer *b_ptr); | 110 | void (*disable_media)(struct tipc_bearer *b_ptr); | 
| 108 | int (*addr2str)(struct tipc_media_addr *addr, | 111 | int (*addr2str)(struct tipc_media_addr *addr, | 
| 109 | char *strbuf, | 112 | char *strbuf, | 
| @@ -182,6 +185,9 @@ extern struct tipc_media eth_media_info; | |||
| 182 | #ifdef CONFIG_TIPC_MEDIA_IB | 185 | #ifdef CONFIG_TIPC_MEDIA_IB | 
| 183 | extern struct tipc_media ib_media_info; | 186 | extern struct tipc_media ib_media_info; | 
| 184 | #endif | 187 | #endif | 
| 188 | #ifdef CONFIG_TIPC_MEDIA_UDP | ||
| 189 | extern struct tipc_media udp_media_info; | ||
| 190 | #endif | ||
| 185 | 191 | ||
| 186 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | 192 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | 
| 187 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | 193 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | 
| @@ -196,7 +202,8 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); | |||
| 196 | int tipc_media_set_priority(const char *name, u32 new_value); | 202 | int tipc_media_set_priority(const char *name, u32 new_value); | 
| 197 | int tipc_media_set_window(const char *name, u32 new_value); | 203 | int tipc_media_set_window(const char *name, u32 new_value); | 
| 198 | void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); | 204 | void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); | 
| 199 | int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b); | 205 | int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, | 
| 206 | struct nlattr *attrs[]); | ||
| 200 | void tipc_disable_l2_media(struct tipc_bearer *b); | 207 | void tipc_disable_l2_media(struct tipc_bearer *b); | 
| 201 | int tipc_l2_send_msg(struct net *net, struct sk_buff *buf, | 208 | int tipc_l2_send_msg(struct net *net, struct sk_buff *buf, | 
| 202 | struct tipc_bearer *b, struct tipc_media_addr *dest); | 209 | struct tipc_bearer *b, struct tipc_media_addr *dest); | 
| diff --git a/net/tipc/core.c b/net/tipc/core.c index 935205e6bcfe..be1c9fa60b09 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
| @@ -152,11 +152,11 @@ out_netlink: | |||
| 152 | static void __exit tipc_exit(void) | 152 | static void __exit tipc_exit(void) | 
| 153 | { | 153 | { | 
| 154 | tipc_bearer_cleanup(); | 154 | tipc_bearer_cleanup(); | 
| 155 | unregister_pernet_subsys(&tipc_net_ops); | ||
| 155 | tipc_netlink_stop(); | 156 | tipc_netlink_stop(); | 
| 156 | tipc_netlink_compat_stop(); | 157 | tipc_netlink_compat_stop(); | 
| 157 | tipc_socket_stop(); | 158 | tipc_socket_stop(); | 
| 158 | tipc_unregister_sysctl(); | 159 | tipc_unregister_sysctl(); | 
| 159 | unregister_pernet_subsys(&tipc_net_ops); | ||
| 160 | 160 | ||
| 161 | pr_info("Deactivated\n"); | 161 | pr_info("Deactivated\n"); | 
| 162 | } | 162 | } | 
| diff --git a/net/tipc/discover.c b/net/tipc/discover.c index feef3753615d..967e292f53c8 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
| @@ -86,9 +86,10 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type, | |||
| 86 | 86 | ||
| 87 | msg = buf_msg(buf); | 87 | msg = buf_msg(buf); | 
| 88 | tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type, | 88 | tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type, | 
| 89 | INT_H_SIZE, dest_domain); | 89 | MAX_H_SIZE, dest_domain); | 
| 90 | msg_set_non_seq(msg, 1); | 90 | msg_set_non_seq(msg, 1); | 
| 91 | msg_set_node_sig(msg, tn->random); | 91 | msg_set_node_sig(msg, tn->random); | 
| 92 | msg_set_node_capabilities(msg, 0); | ||
| 92 | msg_set_dest_domain(msg, dest_domain); | 93 | msg_set_dest_domain(msg, dest_domain); | 
| 93 | msg_set_bc_netid(msg, tn->net_id); | 94 | msg_set_bc_netid(msg, tn->net_id); | 
| 94 | b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); | 95 | b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); | 
| @@ -133,6 +134,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, | |||
| 133 | u32 net_id = msg_bc_netid(msg); | 134 | u32 net_id = msg_bc_netid(msg); | 
| 134 | u32 mtyp = msg_type(msg); | 135 | u32 mtyp = msg_type(msg); | 
| 135 | u32 signature = msg_node_sig(msg); | 136 | u32 signature = msg_node_sig(msg); | 
| 137 | u16 caps = msg_node_capabilities(msg); | ||
| 136 | bool addr_match = false; | 138 | bool addr_match = false; | 
| 137 | bool sign_match = false; | 139 | bool sign_match = false; | 
| 138 | bool link_up = false; | 140 | bool link_up = false; | 
| @@ -167,6 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, | |||
| 167 | if (!node) | 169 | if (!node) | 
| 168 | return; | 170 | return; | 
| 169 | tipc_node_lock(node); | 171 | tipc_node_lock(node); | 
| 172 | node->capabilities = caps; | ||
| 170 | link = node->links[bearer->identity]; | 173 | link = node->links[bearer->identity]; | 
| 171 | 174 | ||
| 172 | /* Prepare to validate requesting node's signature and media address */ | 175 | /* Prepare to validate requesting node's signature and media address */ | 
| @@ -249,7 +252,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, | |||
| 249 | 252 | ||
| 250 | /* Send response, if necessary */ | 253 | /* Send response, if necessary */ | 
| 251 | if (respond && (mtyp == DSC_REQ_MSG)) { | 254 | if (respond && (mtyp == DSC_REQ_MSG)) { | 
| 252 | rbuf = tipc_buf_acquire(INT_H_SIZE); | 255 | rbuf = tipc_buf_acquire(MAX_H_SIZE); | 
| 253 | if (rbuf) { | 256 | if (rbuf) { | 
| 254 | tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer); | 257 | tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer); | 
| 255 | tipc_bearer_send(net, bearer->identity, rbuf, &maddr); | 258 | tipc_bearer_send(net, bearer->identity, rbuf, &maddr); | 
| @@ -257,6 +260,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, | |||
| 257 | } | 260 | } | 
| 258 | } | 261 | } | 
| 259 | tipc_node_unlock(node); | 262 | tipc_node_unlock(node); | 
| 263 | tipc_node_put(node); | ||
| 260 | } | 264 | } | 
| 261 | 265 | ||
| 262 | /** | 266 | /** | 
| @@ -359,8 +363,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr, | |||
| 359 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | 363 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | 
| 360 | if (!req) | 364 | if (!req) | 
| 361 | return -ENOMEM; | 365 | return -ENOMEM; | 
| 362 | 366 | req->buf = tipc_buf_acquire(MAX_H_SIZE); | |
| 363 | req->buf = tipc_buf_acquire(INT_H_SIZE); | ||
| 364 | if (!req->buf) { | 367 | if (!req->buf) { | 
| 365 | kfree(req); | 368 | kfree(req); | 
| 366 | return -ENOMEM; | 369 | return -ENOMEM; | 
| diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 5e1426f1751f..f69a2fde9f4a 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c | |||
| @@ -37,8 +37,6 @@ | |||
| 37 | #include "core.h" | 37 | #include "core.h" | 
| 38 | #include "bearer.h" | 38 | #include "bearer.h" | 
| 39 | 39 | ||
| 40 | #define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */ | ||
| 41 | |||
| 42 | /* Convert Ethernet address (media address format) to string */ | 40 | /* Convert Ethernet address (media address format) to string */ | 
| 43 | static int tipc_eth_addr2str(struct tipc_media_addr *addr, | 41 | static int tipc_eth_addr2str(struct tipc_media_addr *addr, | 
| 44 | char *strbuf, int bufsz) | 42 | char *strbuf, int bufsz) | 
| @@ -53,9 +51,9 @@ static int tipc_eth_addr2str(struct tipc_media_addr *addr, | |||
| 53 | /* Convert from media address format to discovery message addr format */ | 51 | /* Convert from media address format to discovery message addr format */ | 
| 54 | static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) | 52 | static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) | 
| 55 | { | 53 | { | 
| 56 | memset(msg, 0, TIPC_MEDIA_ADDR_SIZE); | 54 | memset(msg, 0, TIPC_MEDIA_INFO_SIZE); | 
| 57 | msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; | 55 | msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; | 
| 58 | memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN); | 56 | memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN); | 
| 59 | return 0; | 57 | return 0; | 
| 60 | } | 58 | } | 
| 61 | 59 | ||
| @@ -79,7 +77,7 @@ static int tipc_eth_msg2addr(struct tipc_bearer *b, | |||
| 79 | char *msg) | 77 | char *msg) | 
| 80 | { | 78 | { | 
| 81 | /* Skip past preamble: */ | 79 | /* Skip past preamble: */ | 
| 82 | msg += ETH_ADDR_OFFSET; | 80 | msg += TIPC_MEDIA_ADDR_OFFSET; | 
| 83 | return tipc_eth_raw2addr(b, addr, msg); | 81 | return tipc_eth_raw2addr(b, addr, msg); | 
| 84 | } | 82 | } | 
| 85 | 83 | ||
| diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c index 8522eef9c136..e8c16718e3fa 100644 --- a/net/tipc/ib_media.c +++ b/net/tipc/ib_media.c | |||
| @@ -57,7 +57,7 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, | |||
| 57 | /* Convert from media address format to discovery message addr format */ | 57 | /* Convert from media address format to discovery message addr format */ | 
| 58 | static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) | 58 | static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) | 
| 59 | { | 59 | { | 
| 60 | memset(msg, 0, TIPC_MEDIA_ADDR_SIZE); | 60 | memset(msg, 0, TIPC_MEDIA_INFO_SIZE); | 
| 61 | memcpy(msg, addr->value, INFINIBAND_ALEN); | 61 | memcpy(msg, addr->value, INFINIBAND_ALEN); | 
| 62 | return 0; | 62 | return 0; | 
| 63 | } | 63 | } | 
| diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..43a515dc97b0 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* | 
| 2 | * net/tipc/link.c: TIPC link code | 2 | * net/tipc/link.c: TIPC link code | 
| 3 | * | 3 | * | 
| 4 | * Copyright (c) 1996-2007, 2012-2014, Ericsson AB | 4 | * Copyright (c) 1996-2007, 2012-2015, Ericsson AB | 
| 5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | 5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | 
| 6 | * All rights reserved. | 6 | * All rights reserved. | 
| 7 | * | 7 | * | 
| @@ -35,6 +35,7 @@ | |||
| 35 | */ | 35 | */ | 
| 36 | 36 | ||
| 37 | #include "core.h" | 37 | #include "core.h" | 
| 38 | #include "subscr.h" | ||
| 38 | #include "link.h" | 39 | #include "link.h" | 
| 39 | #include "bcast.h" | 40 | #include "bcast.h" | 
| 40 | #include "socket.h" | 41 | #include "socket.h" | 
| @@ -88,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { | |||
| 88 | #define TIMEOUT_EVT 560817u /* link timer expired */ | 89 | #define TIMEOUT_EVT 560817u /* link timer expired */ | 
| 89 | 90 | ||
| 90 | /* | 91 | /* | 
| 91 | * The following two 'message types' is really just implementation | 92 | * State value stored in 'failover_pkts' | 
| 92 | * data conveniently stored in the message header. | ||
| 93 | * They must not be considered part of the protocol | ||
| 94 | */ | 93 | */ | 
| 95 | #define OPEN_MSG 0 | 94 | #define FIRST_FAILOVER 0xffffu | 
| 96 | #define CLOSED_MSG 1 | ||
| 97 | |||
| 98 | /* | ||
| 99 | * State value stored in 'exp_msg_count' | ||
| 100 | */ | ||
| 101 | #define START_CHANGEOVER 100000u | ||
| 102 | 95 | ||
| 103 | static void link_handle_out_of_seq_msg(struct tipc_link *link, | 96 | static void link_handle_out_of_seq_msg(struct tipc_link *link, | 
| 104 | struct sk_buff *skb); | 97 | struct sk_buff *skb); | 
| 105 | static void tipc_link_proto_rcv(struct tipc_link *link, | 98 | static void tipc_link_proto_rcv(struct tipc_link *link, | 
| 106 | struct sk_buff *skb); | 99 | struct sk_buff *skb); | 
| 107 | static int tipc_link_tunnel_rcv(struct tipc_node *node, | ||
| 108 | struct sk_buff **skb); | ||
| 109 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); | 100 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); | 
| 110 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 101 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 
| 111 | static void link_reset_statistics(struct tipc_link *l_ptr); | 102 | static void link_reset_statistics(struct tipc_link *l_ptr); | 
| @@ -114,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l); | |||
| 114 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 105 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 
| 115 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); | 106 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); | 
| 116 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); | 107 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); | 
| 117 | 108 | static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb); | |
| 118 | /* | 109 | /* | 
| 119 | * Simple link routines | 110 | * Simple link routines | 
| 120 | */ | 111 | */ | 
| @@ -138,32 +129,11 @@ static void tipc_link_put(struct tipc_link *l_ptr) | |||
| 138 | kref_put(&l_ptr->ref, tipc_link_release); | 129 | kref_put(&l_ptr->ref, tipc_link_release); | 
| 139 | } | 130 | } | 
| 140 | 131 | ||
| 141 | static void link_init_max_pkt(struct tipc_link *l_ptr) | 132 | static struct tipc_link *tipc_parallel_link(struct tipc_link *l) | 
| 142 | { | 133 | { | 
| 143 | struct tipc_node *node = l_ptr->owner; | 134 | if (l->owner->active_links[0] != l) | 
| 144 | struct tipc_net *tn = net_generic(node->net, tipc_net_id); | 135 | return l->owner->active_links[0]; | 
| 145 | struct tipc_bearer *b_ptr; | 136 | return l->owner->active_links[1]; | 
| 146 | u32 max_pkt; | ||
| 147 | |||
| 148 | rcu_read_lock(); | ||
| 149 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]); | ||
| 150 | if (!b_ptr) { | ||
| 151 | rcu_read_unlock(); | ||
| 152 | return; | ||
| 153 | } | ||
| 154 | max_pkt = (b_ptr->mtu & ~3); | ||
| 155 | rcu_read_unlock(); | ||
| 156 | |||
| 157 | if (max_pkt > MAX_MSG_SIZE) | ||
| 158 | max_pkt = MAX_MSG_SIZE; | ||
| 159 | |||
| 160 | l_ptr->max_pkt_target = max_pkt; | ||
| 161 | if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) | ||
| 162 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
| 163 | else | ||
| 164 | l_ptr->max_pkt = MAX_PKT_DEFAULT; | ||
| 165 | |||
| 166 | l_ptr->max_pkt_probes = 0; | ||
| 167 | } | 137 | } | 
| 168 | 138 | ||
| 169 | /* | 139 | /* | 
| @@ -194,10 +164,10 @@ static void link_timeout(unsigned long data) | |||
| 194 | tipc_node_lock(l_ptr->owner); | 164 | tipc_node_lock(l_ptr->owner); | 
| 195 | 165 | ||
| 196 | /* update counters used in statistical profiling of send traffic */ | 166 | /* update counters used in statistical profiling of send traffic */ | 
| 197 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); | 167 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq); | 
| 198 | l_ptr->stats.queue_sz_counts++; | 168 | l_ptr->stats.queue_sz_counts++; | 
| 199 | 169 | ||
| 200 | skb = skb_peek(&l_ptr->outqueue); | 170 | skb = skb_peek(&l_ptr->transmq); | 
| 201 | if (skb) { | 171 | if (skb) { | 
| 202 | struct tipc_msg *msg = buf_msg(skb); | 172 | struct tipc_msg *msg = buf_msg(skb); | 
| 203 | u32 length = msg_size(msg); | 173 | u32 length = msg_size(msg); | 
| @@ -229,7 +199,7 @@ static void link_timeout(unsigned long data) | |||
| 229 | /* do all other link processing performed on a periodic basis */ | 199 | /* do all other link processing performed on a periodic basis */ | 
| 230 | link_state_event(l_ptr, TIMEOUT_EVT); | 200 | link_state_event(l_ptr, TIMEOUT_EVT); | 
| 231 | 201 | ||
| 232 | if (l_ptr->next_out) | 202 | if (skb_queue_len(&l_ptr->backlogq)) | 
| 233 | tipc_link_push_packets(l_ptr); | 203 | tipc_link_push_packets(l_ptr); | 
| 234 | 204 | ||
| 235 | tipc_node_unlock(l_ptr->owner); | 205 | tipc_node_unlock(l_ptr->owner); | 
| @@ -305,16 +275,15 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
| 305 | msg_set_session(msg, (tn->random & 0xffff)); | 275 | msg_set_session(msg, (tn->random & 0xffff)); | 
| 306 | msg_set_bearer_id(msg, b_ptr->identity); | 276 | msg_set_bearer_id(msg, b_ptr->identity); | 
| 307 | strcpy((char *)msg_data(msg), if_name); | 277 | strcpy((char *)msg_data(msg), if_name); | 
| 308 | 278 | l_ptr->net_plane = b_ptr->net_plane; | |
| 279 | l_ptr->advertised_mtu = b_ptr->mtu; | ||
| 280 | l_ptr->mtu = l_ptr->advertised_mtu; | ||
| 309 | l_ptr->priority = b_ptr->priority; | 281 | l_ptr->priority = b_ptr->priority; | 
| 310 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); | 282 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); | 
| 311 | |||
| 312 | l_ptr->net_plane = b_ptr->net_plane; | ||
| 313 | link_init_max_pkt(l_ptr); | ||
| 314 | |||
| 315 | l_ptr->next_out_no = 1; | 283 | l_ptr->next_out_no = 1; | 
| 316 | __skb_queue_head_init(&l_ptr->outqueue); | 284 | __skb_queue_head_init(&l_ptr->transmq); | 
| 317 | __skb_queue_head_init(&l_ptr->deferred_queue); | 285 | __skb_queue_head_init(&l_ptr->backlogq); | 
| 286 | __skb_queue_head_init(&l_ptr->deferdq); | ||
| 318 | skb_queue_head_init(&l_ptr->wakeupq); | 287 | skb_queue_head_init(&l_ptr->wakeupq); | 
| 319 | skb_queue_head_init(&l_ptr->inputq); | 288 | skb_queue_head_init(&l_ptr->inputq); | 
| 320 | skb_queue_head_init(&l_ptr->namedq); | 289 | skb_queue_head_init(&l_ptr->namedq); | 
| @@ -327,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
| 327 | } | 296 | } | 
| 328 | 297 | ||
| 329 | /** | 298 | /** | 
| 330 | * link_delete - Conditional deletion of link. | 299 | * tipc_link_delete - Delete a link | 
| 331 | * If timer still running, real delete is done when it expires | 300 | * @l: link to be deleted | 
| 332 | * @link: link to be deleted | ||
| 333 | */ | 301 | */ | 
| 334 | void tipc_link_delete(struct tipc_link *link) | 302 | void tipc_link_delete(struct tipc_link *l) | 
| 335 | { | 303 | { | 
| 336 | tipc_link_reset_fragments(link); | 304 | tipc_link_reset(l); | 
| 337 | tipc_node_detach_link(link->owner, link); | 305 | if (del_timer(&l->timer)) | 
| 338 | tipc_link_put(link); | 306 | tipc_link_put(l); | 
| 307 | l->flags |= LINK_STOPPED; | ||
| 308 | /* Delete link now, or when timer is finished: */ | ||
| 309 | tipc_link_reset_fragments(l); | ||
| 310 | tipc_node_detach_link(l->owner, l); | ||
| 311 | tipc_link_put(l); | ||
| 339 | } | 312 | } | 
| 340 | 313 | ||
| 341 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | 314 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | 
| @@ -349,16 +322,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
| 349 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 322 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 
| 350 | tipc_node_lock(node); | 323 | tipc_node_lock(node); | 
| 351 | link = node->links[bearer_id]; | 324 | link = node->links[bearer_id]; | 
| 352 | if (!link) { | 325 | if (link) | 
| 353 | tipc_node_unlock(node); | ||
| 354 | continue; | ||
| 355 | } | ||
| 356 | tipc_link_reset(link); | ||
| 357 | if (del_timer(&link->timer)) | ||
| 358 | tipc_link_put(link); | ||
| 359 | link->flags |= LINK_STOPPED; | ||
| 360 | /* Delete link now, or when failover is finished: */ | ||
| 361 | if (shutting_down || !tipc_node_is_up(node)) | ||
| 362 | tipc_link_delete(link); | 326 | tipc_link_delete(link); | 
| 363 | tipc_node_unlock(node); | 327 | tipc_node_unlock(node); | 
| 364 | } | 328 | } | 
| @@ -366,28 +330,43 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
| 366 | } | 330 | } | 
| 367 | 331 | ||
| 368 | /** | 332 | /** | 
| 369 | * link_schedule_user - schedule user for wakeup after congestion | 333 | * link_schedule_user - schedule a message sender for wakeup after congestion | 
| 370 | * @link: congested link | 334 | * @link: congested link | 
| 371 | * @oport: sending port | 335 | * @list: message that was attempted sent | 
| 372 | * @chain_sz: size of buffer chain that was attempted sent | ||
| 373 | * @imp: importance of message attempted sent | ||
| 374 | * Create pseudo msg to send back to user when congestion abates | 336 | * Create pseudo msg to send back to user when congestion abates | 
| 337 | * Only consumes message if there is an error | ||
| 375 | */ | 338 | */ | 
| 376 | static bool link_schedule_user(struct tipc_link *link, u32 oport, | 339 | static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) | 
| 377 | uint chain_sz, uint imp) | ||
| 378 | { | 340 | { | 
| 379 | struct sk_buff *buf; | 341 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 
| 342 | int imp = msg_importance(msg); | ||
| 343 | u32 oport = msg_origport(msg); | ||
| 344 | u32 addr = link_own_addr(link); | ||
| 345 | struct sk_buff *skb; | ||
| 380 | 346 | ||
| 381 | buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | 347 | /* This really cannot happen... */ | 
| 382 | link_own_addr(link), link_own_addr(link), | 348 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { | 
| 383 | oport, 0, 0); | 349 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); | 
| 384 | if (!buf) | 350 | tipc_link_reset(link); | 
| 385 | return false; | 351 | goto err; | 
| 386 | TIPC_SKB_CB(buf)->chain_sz = chain_sz; | 352 | } | 
| 387 | TIPC_SKB_CB(buf)->chain_imp = imp; | 353 | /* Non-blocking sender: */ | 
| 388 | skb_queue_tail(&link->wakeupq, buf); | 354 | if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) | 
| 355 | return -ELINKCONG; | ||
| 356 | |||
| 357 | /* Create and schedule wakeup pseudo message */ | ||
| 358 | skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | ||
| 359 | addr, addr, oport, 0, 0); | ||
| 360 | if (!skb) | ||
| 361 | goto err; | ||
| 362 | TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); | ||
| 363 | TIPC_SKB_CB(skb)->chain_imp = imp; | ||
| 364 | skb_queue_tail(&link->wakeupq, skb); | ||
| 389 | link->stats.link_congs++; | 365 | link->stats.link_congs++; | 
| 390 | return true; | 366 | return -ELINKCONG; | 
| 367 | err: | ||
| 368 | __skb_queue_purge(list); | ||
| 369 | return -ENOBUFS; | ||
| 391 | } | 370 | } | 
| 392 | 371 | ||
| 393 | /** | 372 | /** | 
| @@ -396,19 +375,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, | |||
| 396 | * Move a number of waiting users, as permitted by available space in | 375 | * Move a number of waiting users, as permitted by available space in | 
| 397 | * the send queue, from link wait queue to node wait queue for wakeup | 376 | * the send queue, from link wait queue to node wait queue for wakeup | 
| 398 | */ | 377 | */ | 
| 399 | void link_prepare_wakeup(struct tipc_link *link) | 378 | void link_prepare_wakeup(struct tipc_link *l) | 
| 400 | { | 379 | { | 
| 401 | uint pend_qsz = skb_queue_len(&link->outqueue); | 380 | int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; | 
| 381 | int imp, lim; | ||
| 402 | struct sk_buff *skb, *tmp; | 382 | struct sk_buff *skb, *tmp; | 
| 403 | 383 | ||
| 404 | skb_queue_walk_safe(&link->wakeupq, skb, tmp) { | 384 | skb_queue_walk_safe(&l->wakeupq, skb, tmp) { | 
| 405 | if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) | 385 | imp = TIPC_SKB_CB(skb)->chain_imp; | 
| 386 | lim = l->window + l->backlog[imp].limit; | ||
| 387 | pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; | ||
| 388 | if ((pnd[imp] + l->backlog[imp].len) >= lim) | ||
| 406 | break; | 389 | break; | 
| 407 | pend_qsz += TIPC_SKB_CB(skb)->chain_sz; | 390 | skb_unlink(skb, &l->wakeupq); | 
| 408 | skb_unlink(skb, &link->wakeupq); | 391 | skb_queue_tail(&l->inputq, skb); | 
| 409 | skb_queue_tail(&link->inputq, skb); | 392 | l->owner->inputq = &l->inputq; | 
| 410 | link->owner->inputq = &link->inputq; | 393 | l->owner->action_flags |= TIPC_MSG_EVT; | 
| 411 | link->owner->action_flags |= TIPC_MSG_EVT; | ||
| 412 | } | 394 | } | 
| 413 | } | 395 | } | 
| 414 | 396 | ||
| @@ -422,31 +404,42 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) | |||
| 422 | l_ptr->reasm_buf = NULL; | 404 | l_ptr->reasm_buf = NULL; | 
| 423 | } | 405 | } | 
| 424 | 406 | ||
| 407 | static void tipc_link_purge_backlog(struct tipc_link *l) | ||
| 408 | { | ||
| 409 | __skb_queue_purge(&l->backlogq); | ||
| 410 | l->backlog[TIPC_LOW_IMPORTANCE].len = 0; | ||
| 411 | l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; | ||
| 412 | l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; | ||
| 413 | l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; | ||
| 414 | l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; | ||
| 415 | } | ||
| 416 | |||
| 425 | /** | 417 | /** | 
| 426 | * tipc_link_purge_queues - purge all pkt queues associated with link | 418 | * tipc_link_purge_queues - purge all pkt queues associated with link | 
| 427 | * @l_ptr: pointer to link | 419 | * @l_ptr: pointer to link | 
| 428 | */ | 420 | */ | 
| 429 | void tipc_link_purge_queues(struct tipc_link *l_ptr) | 421 | void tipc_link_purge_queues(struct tipc_link *l_ptr) | 
| 430 | { | 422 | { | 
| 431 | __skb_queue_purge(&l_ptr->deferred_queue); | 423 | __skb_queue_purge(&l_ptr->deferdq); | 
| 432 | __skb_queue_purge(&l_ptr->outqueue); | 424 | __skb_queue_purge(&l_ptr->transmq); | 
| 425 | tipc_link_purge_backlog(l_ptr); | ||
| 433 | tipc_link_reset_fragments(l_ptr); | 426 | tipc_link_reset_fragments(l_ptr); | 
| 434 | } | 427 | } | 
| 435 | 428 | ||
| 436 | void tipc_link_reset(struct tipc_link *l_ptr) | 429 | void tipc_link_reset(struct tipc_link *l_ptr) | 
| 437 | { | 430 | { | 
| 438 | u32 prev_state = l_ptr->state; | 431 | u32 prev_state = l_ptr->state; | 
| 439 | u32 checkpoint = l_ptr->next_in_no; | ||
| 440 | int was_active_link = tipc_link_is_active(l_ptr); | 432 | int was_active_link = tipc_link_is_active(l_ptr); | 
| 441 | struct tipc_node *owner = l_ptr->owner; | 433 | struct tipc_node *owner = l_ptr->owner; | 
| 434 | struct tipc_link *pl = tipc_parallel_link(l_ptr); | ||
| 442 | 435 | ||
| 443 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); | 436 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); | 
| 444 | 437 | ||
| 445 | /* Link is down, accept any session */ | 438 | /* Link is down, accept any session */ | 
| 446 | l_ptr->peer_session = INVALID_SESSION; | 439 | l_ptr->peer_session = INVALID_SESSION; | 
| 447 | 440 | ||
| 448 | /* Prepare for max packet size negotiation */ | 441 | /* Prepare for renewed mtu size negotiation */ | 
| 449 | link_init_max_pkt(l_ptr); | 442 | l_ptr->mtu = l_ptr->advertised_mtu; | 
| 450 | 443 | ||
| 451 | l_ptr->state = RESET_UNKNOWN; | 444 | l_ptr->state = RESET_UNKNOWN; | 
| 452 | 445 | ||
| @@ -456,20 +449,26 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
| 456 | tipc_node_link_down(l_ptr->owner, l_ptr); | 449 | tipc_node_link_down(l_ptr->owner, l_ptr); | 
| 457 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); | 450 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); | 
| 458 | 451 | ||
| 459 | if (was_active_link && tipc_node_active_links(l_ptr->owner)) { | 452 | if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) { | 
| 460 | l_ptr->reset_checkpoint = checkpoint; | 453 | l_ptr->flags |= LINK_FAILINGOVER; | 
| 461 | l_ptr->exp_msg_count = START_CHANGEOVER; | 454 | l_ptr->failover_checkpt = l_ptr->next_in_no; | 
| 455 | pl->failover_pkts = FIRST_FAILOVER; | ||
| 456 | pl->failover_checkpt = l_ptr->next_in_no; | ||
| 457 | pl->failover_skb = l_ptr->reasm_buf; | ||
| 458 | } else { | ||
| 459 | kfree_skb(l_ptr->reasm_buf); | ||
| 462 | } | 460 | } | 
| 463 | |||
| 464 | /* Clean up all queues, except inputq: */ | 461 | /* Clean up all queues, except inputq: */ | 
| 465 | __skb_queue_purge(&l_ptr->outqueue); | 462 | __skb_queue_purge(&l_ptr->transmq); | 
| 466 | __skb_queue_purge(&l_ptr->deferred_queue); | 463 | __skb_queue_purge(&l_ptr->deferdq); | 
| 467 | skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); | 464 | if (!owner->inputq) | 
| 468 | if (!skb_queue_empty(&l_ptr->inputq)) | 465 | owner->inputq = &l_ptr->inputq; | 
| 466 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
| 467 | if (!skb_queue_empty(owner->inputq)) | ||
| 469 | owner->action_flags |= TIPC_MSG_EVT; | 468 | owner->action_flags |= TIPC_MSG_EVT; | 
| 470 | owner->inputq = &l_ptr->inputq; | 469 | tipc_link_purge_backlog(l_ptr); | 
| 471 | l_ptr->next_out = NULL; | 470 | l_ptr->reasm_buf = NULL; | 
| 472 | l_ptr->unacked_window = 0; | 471 | l_ptr->rcv_unacked = 0; | 
| 473 | l_ptr->checkpoint = 1; | 472 | l_ptr->checkpoint = 1; | 
| 474 | l_ptr->next_out_no = 1; | 473 | l_ptr->next_out_no = 1; | 
| 475 | l_ptr->fsm_msg_cnt = 0; | 474 | l_ptr->fsm_msg_cnt = 0; | 
| @@ -520,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 520 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) | 519 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) | 
| 521 | return; /* Not yet. */ | 520 | return; /* Not yet. */ | 
| 522 | 521 | ||
| 523 | /* Check whether changeover is going on */ | 522 | if (l_ptr->flags & LINK_FAILINGOVER) { | 
| 524 | if (l_ptr->exp_msg_count) { | ||
| 525 | if (event == TIMEOUT_EVT) | 523 | if (event == TIMEOUT_EVT) | 
| 526 | link_set_timer(l_ptr, cont_intv); | 524 | link_set_timer(l_ptr, cont_intv); | 
| 527 | return; | 525 | return; | 
| @@ -538,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 538 | l_ptr->checkpoint = l_ptr->next_in_no; | 536 | l_ptr->checkpoint = l_ptr->next_in_no; | 
| 539 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 537 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 
| 540 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 538 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 
| 541 | 0, 0, 0, 0, 0); | 539 | 0, 0, 0, 0); | 
| 542 | l_ptr->fsm_msg_cnt++; | ||
| 543 | } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { | ||
| 544 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | ||
| 545 | 1, 0, 0, 0, 0); | ||
| 546 | l_ptr->fsm_msg_cnt++; | 540 | l_ptr->fsm_msg_cnt++; | 
| 547 | } | 541 | } | 
| 548 | link_set_timer(l_ptr, cont_intv); | 542 | link_set_timer(l_ptr, cont_intv); | 
| @@ -550,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 550 | } | 544 | } | 
| 551 | l_ptr->state = WORKING_UNKNOWN; | 545 | l_ptr->state = WORKING_UNKNOWN; | 
| 552 | l_ptr->fsm_msg_cnt = 0; | 546 | l_ptr->fsm_msg_cnt = 0; | 
| 553 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 547 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); | 
| 554 | l_ptr->fsm_msg_cnt++; | 548 | l_ptr->fsm_msg_cnt++; | 
| 555 | link_set_timer(l_ptr, cont_intv / 4); | 549 | link_set_timer(l_ptr, cont_intv / 4); | 
| 556 | break; | 550 | break; | 
| @@ -561,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 561 | l_ptr->state = RESET_RESET; | 555 | l_ptr->state = RESET_RESET; | 
| 562 | l_ptr->fsm_msg_cnt = 0; | 556 | l_ptr->fsm_msg_cnt = 0; | 
| 563 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 557 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 
| 564 | 0, 0, 0, 0, 0); | 558 | 0, 0, 0, 0); | 
| 565 | l_ptr->fsm_msg_cnt++; | 559 | l_ptr->fsm_msg_cnt++; | 
| 566 | link_set_timer(l_ptr, cont_intv); | 560 | link_set_timer(l_ptr, cont_intv); | 
| 567 | break; | 561 | break; | 
| @@ -584,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 584 | l_ptr->state = RESET_RESET; | 578 | l_ptr->state = RESET_RESET; | 
| 585 | l_ptr->fsm_msg_cnt = 0; | 579 | l_ptr->fsm_msg_cnt = 0; | 
| 586 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 580 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 
| 587 | 0, 0, 0, 0, 0); | 581 | 0, 0, 0, 0); | 
| 588 | l_ptr->fsm_msg_cnt++; | 582 | l_ptr->fsm_msg_cnt++; | 
| 589 | link_set_timer(l_ptr, cont_intv); | 583 | link_set_timer(l_ptr, cont_intv); | 
| 590 | break; | 584 | break; | 
| @@ -595,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 595 | l_ptr->checkpoint = l_ptr->next_in_no; | 589 | l_ptr->checkpoint = l_ptr->next_in_no; | 
| 596 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 590 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 
| 597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 591 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 
| 598 | 0, 0, 0, 0, 0); | 592 | 0, 0, 0, 0); | 
| 599 | l_ptr->fsm_msg_cnt++; | 593 | l_ptr->fsm_msg_cnt++; | 
| 600 | } | 594 | } | 
| 601 | link_set_timer(l_ptr, cont_intv); | 595 | link_set_timer(l_ptr, cont_intv); | 
| 602 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { | 596 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { | 
| 603 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 
| 604 | 1, 0, 0, 0, 0); | 598 | 1, 0, 0, 0); | 
| 605 | l_ptr->fsm_msg_cnt++; | 599 | l_ptr->fsm_msg_cnt++; | 
| 606 | link_set_timer(l_ptr, cont_intv / 4); | 600 | link_set_timer(l_ptr, cont_intv / 4); | 
| 607 | } else { /* Link has failed */ | 601 | } else { /* Link has failed */ | 
| @@ -611,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 611 | l_ptr->state = RESET_UNKNOWN; | 605 | l_ptr->state = RESET_UNKNOWN; | 
| 612 | l_ptr->fsm_msg_cnt = 0; | 606 | l_ptr->fsm_msg_cnt = 0; | 
| 613 | tipc_link_proto_xmit(l_ptr, RESET_MSG, | 607 | tipc_link_proto_xmit(l_ptr, RESET_MSG, | 
| 614 | 0, 0, 0, 0, 0); | 608 | 0, 0, 0, 0); | 
| 615 | l_ptr->fsm_msg_cnt++; | 609 | l_ptr->fsm_msg_cnt++; | 
| 616 | link_set_timer(l_ptr, cont_intv); | 610 | link_set_timer(l_ptr, cont_intv); | 
| 617 | } | 611 | } | 
| @@ -631,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 631 | l_ptr->state = WORKING_WORKING; | 625 | l_ptr->state = WORKING_WORKING; | 
| 632 | l_ptr->fsm_msg_cnt = 0; | 626 | l_ptr->fsm_msg_cnt = 0; | 
| 633 | link_activate(l_ptr); | 627 | link_activate(l_ptr); | 
| 634 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 628 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); | 
| 635 | l_ptr->fsm_msg_cnt++; | 629 | l_ptr->fsm_msg_cnt++; | 
| 636 | if (l_ptr->owner->working_links == 1) | 630 | if (l_ptr->owner->working_links == 1) | 
| 637 | tipc_link_sync_xmit(l_ptr); | 631 | tipc_link_sync_xmit(l_ptr); | 
| @@ -641,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 641 | l_ptr->state = RESET_RESET; | 635 | l_ptr->state = RESET_RESET; | 
| 642 | l_ptr->fsm_msg_cnt = 0; | 636 | l_ptr->fsm_msg_cnt = 0; | 
| 643 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 637 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 
| 644 | 1, 0, 0, 0, 0); | 638 | 1, 0, 0, 0); | 
| 645 | l_ptr->fsm_msg_cnt++; | 639 | l_ptr->fsm_msg_cnt++; | 
| 646 | link_set_timer(l_ptr, cont_intv); | 640 | link_set_timer(l_ptr, cont_intv); | 
| 647 | break; | 641 | break; | 
| @@ -651,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 651 | link_set_timer(l_ptr, cont_intv); | 645 | link_set_timer(l_ptr, cont_intv); | 
| 652 | break; | 646 | break; | 
| 653 | case TIMEOUT_EVT: | 647 | case TIMEOUT_EVT: | 
| 654 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 648 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0); | 
| 655 | l_ptr->fsm_msg_cnt++; | 649 | l_ptr->fsm_msg_cnt++; | 
| 656 | link_set_timer(l_ptr, cont_intv); | 650 | link_set_timer(l_ptr, cont_intv); | 
| 657 | break; | 651 | break; | 
| @@ -669,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 669 | l_ptr->state = WORKING_WORKING; | 663 | l_ptr->state = WORKING_WORKING; | 
| 670 | l_ptr->fsm_msg_cnt = 0; | 664 | l_ptr->fsm_msg_cnt = 0; | 
| 671 | link_activate(l_ptr); | 665 | link_activate(l_ptr); | 
| 672 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 666 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); | 
| 673 | l_ptr->fsm_msg_cnt++; | 667 | l_ptr->fsm_msg_cnt++; | 
| 674 | if (l_ptr->owner->working_links == 1) | 668 | if (l_ptr->owner->working_links == 1) | 
| 675 | tipc_link_sync_xmit(l_ptr); | 669 | tipc_link_sync_xmit(l_ptr); | 
| @@ -679,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 679 | break; | 673 | break; | 
| 680 | case TIMEOUT_EVT: | 674 | case TIMEOUT_EVT: | 
| 681 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 675 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 
| 682 | 0, 0, 0, 0, 0); | 676 | 0, 0, 0, 0); | 
| 683 | l_ptr->fsm_msg_cnt++; | 677 | l_ptr->fsm_msg_cnt++; | 
| 684 | link_set_timer(l_ptr, cont_intv); | 678 | link_set_timer(l_ptr, cont_intv); | 
| 685 | break; | 679 | break; | 
| @@ -692,101 +686,65 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 692 | } | 686 | } | 
| 693 | } | 687 | } | 
| 694 | 688 | ||
| 695 | /* tipc_link_cong: determine return value and how to treat the | ||
| 696 | * sent buffer during link congestion. | ||
| 697 | * - For plain, errorless user data messages we keep the buffer and | ||
| 698 | * return -ELINKONG. | ||
| 699 | * - For all other messages we discard the buffer and return -EHOSTUNREACH | ||
| 700 | * - For TIPC internal messages we also reset the link | ||
| 701 | */ | ||
| 702 | static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) | ||
| 703 | { | ||
| 704 | struct sk_buff *skb = skb_peek(list); | ||
| 705 | struct tipc_msg *msg = buf_msg(skb); | ||
| 706 | uint imp = tipc_msg_tot_importance(msg); | ||
| 707 | u32 oport = msg_tot_origport(msg); | ||
| 708 | |||
| 709 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { | ||
| 710 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); | ||
| 711 | tipc_link_reset(link); | ||
| 712 | goto drop; | ||
| 713 | } | ||
| 714 | if (unlikely(msg_errcode(msg))) | ||
| 715 | goto drop; | ||
| 716 | if (unlikely(msg_reroute_cnt(msg))) | ||
| 717 | goto drop; | ||
| 718 | if (TIPC_SKB_CB(skb)->wakeup_pending) | ||
| 719 | return -ELINKCONG; | ||
| 720 | if (link_schedule_user(link, oport, skb_queue_len(list), imp)) | ||
| 721 | return -ELINKCONG; | ||
| 722 | drop: | ||
| 723 | __skb_queue_purge(list); | ||
| 724 | return -EHOSTUNREACH; | ||
| 725 | } | ||
| 726 | |||
| 727 | /** | 689 | /** | 
| 728 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked | 690 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked | 
| 729 | * @link: link to use | 691 | * @link: link to use | 
| 730 | * @list: chain of buffers containing message | 692 | * @list: chain of buffers containing message | 
| 731 | * | 693 | * | 
| 732 | * Consumes the buffer chain, except when returning -ELINKCONG | 694 | * Consumes the buffer chain, except when returning -ELINKCONG, | 
| 733 | * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket | 695 | * since the caller then may want to make more send attempts. | 
| 734 | * user data messages) or -EHOSTUNREACH (all other messages/senders) | 696 | * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS | 
| 735 | * Only the socket functions tipc_send_stream() and tipc_send_packet() need | 697 | * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted | 
| 736 | * to act on the return value, since they may need to do more send attempts. | ||
| 737 | */ | 698 | */ | 
| 738 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 699 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 
| 739 | struct sk_buff_head *list) | 700 | struct sk_buff_head *list) | 
| 740 | { | 701 | { | 
| 741 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 702 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 
| 742 | uint psz = msg_size(msg); | 703 | unsigned int maxwin = link->window; | 
| 743 | uint sndlim = link->queue_limit[0]; | 704 | unsigned int imp = msg_importance(msg); | 
| 744 | uint imp = tipc_msg_tot_importance(msg); | 705 | uint mtu = link->mtu; | 
| 745 | uint mtu = link->max_pkt; | ||
| 746 | uint ack = mod(link->next_in_no - 1); | 706 | uint ack = mod(link->next_in_no - 1); | 
| 747 | uint seqno = link->next_out_no; | 707 | uint seqno = link->next_out_no; | 
| 748 | uint bc_last_in = link->owner->bclink.last_in; | 708 | uint bc_last_in = link->owner->bclink.last_in; | 
| 749 | struct tipc_media_addr *addr = &link->media_addr; | 709 | struct tipc_media_addr *addr = &link->media_addr; | 
| 750 | struct sk_buff_head *outqueue = &link->outqueue; | 710 | struct sk_buff_head *transmq = &link->transmq; | 
| 711 | struct sk_buff_head *backlogq = &link->backlogq; | ||
| 751 | struct sk_buff *skb, *tmp; | 712 | struct sk_buff *skb, *tmp; | 
| 752 | 713 | ||
| 753 | /* Match queue limits against msg importance: */ | 714 | /* Match backlog limit against msg importance: */ | 
| 754 | if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) | 715 | if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit)) | 
| 755 | return tipc_link_cong(link, list); | 716 | return link_schedule_user(link, list); | 
| 756 | 717 | ||
| 757 | /* Has valid packet limit been used ? */ | 718 | if (unlikely(msg_size(msg) > mtu)) { | 
| 758 | if (unlikely(psz > mtu)) { | ||
| 759 | __skb_queue_purge(list); | 719 | __skb_queue_purge(list); | 
| 760 | return -EMSGSIZE; | 720 | return -EMSGSIZE; | 
| 761 | } | 721 | } | 
| 762 | 722 | /* Prepare each packet for sending, and add to relevant queue: */ | |
| 763 | /* Prepare each packet for sending, and add to outqueue: */ | ||
| 764 | skb_queue_walk_safe(list, skb, tmp) { | 723 | skb_queue_walk_safe(list, skb, tmp) { | 
| 765 | __skb_unlink(skb, list); | 724 | __skb_unlink(skb, list); | 
| 766 | msg = buf_msg(skb); | 725 | msg = buf_msg(skb); | 
| 767 | msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); | 726 | msg_set_seqno(msg, seqno); | 
| 727 | msg_set_ack(msg, ack); | ||
| 768 | msg_set_bcast_ack(msg, bc_last_in); | 728 | msg_set_bcast_ack(msg, bc_last_in); | 
| 769 | 729 | ||
| 770 | if (skb_queue_len(outqueue) < sndlim) { | 730 | if (likely(skb_queue_len(transmq) < maxwin)) { | 
| 771 | __skb_queue_tail(outqueue, skb); | 731 | __skb_queue_tail(transmq, skb); | 
| 772 | tipc_bearer_send(net, link->bearer_id, | 732 | tipc_bearer_send(net, link->bearer_id, skb, addr); | 
| 773 | skb, addr); | 733 | link->rcv_unacked = 0; | 
| 774 | link->next_out = NULL; | 734 | seqno++; | 
| 775 | link->unacked_window = 0; | 735 | continue; | 
| 776 | } else if (tipc_msg_bundle(outqueue, skb, mtu)) { | 736 | } | 
| 737 | if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { | ||
| 777 | link->stats.sent_bundled++; | 738 | link->stats.sent_bundled++; | 
| 778 | continue; | 739 | continue; | 
| 779 | } else if (tipc_msg_make_bundle(outqueue, skb, mtu, | 740 | } | 
| 780 | link->addr)) { | 741 | if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { | 
| 781 | link->stats.sent_bundled++; | 742 | link->stats.sent_bundled++; | 
| 782 | link->stats.sent_bundles++; | 743 | link->stats.sent_bundles++; | 
| 783 | if (!link->next_out) | 744 | imp = msg_importance(buf_msg(skb)); | 
| 784 | link->next_out = skb_peek_tail(outqueue); | ||
| 785 | } else { | ||
| 786 | __skb_queue_tail(outqueue, skb); | ||
| 787 | if (!link->next_out) | ||
| 788 | link->next_out = skb; | ||
| 789 | } | 745 | } | 
| 746 | __skb_queue_tail(backlogq, skb); | ||
| 747 | link->backlog[imp].len++; | ||
| 790 | seqno++; | 748 | seqno++; | 
| 791 | } | 749 | } | 
| 792 | link->next_out_no = seqno; | 750 | link->next_out_no = seqno; | 
| @@ -807,13 +765,25 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb) | |||
| 807 | return __tipc_link_xmit(link->owner->net, link, &head); | 765 | return __tipc_link_xmit(link->owner->net, link, &head); | 
| 808 | } | 766 | } | 
| 809 | 767 | ||
| 768 | /* tipc_link_xmit_skb(): send single buffer to destination | ||
| 769 | * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE | ||
| 770 | * messages, which will not be rejected | ||
| 771 | * The only exception is datagram messages rerouted after secondary | ||
| 772 | * lookup, which are rare and safe to dispose of anyway. | ||
| 773 | * TODO: Return real return value, and let callers use | ||
| 774 | * tipc_wait_for_sendpkt() where applicable | ||
| 775 | */ | ||
| 810 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | 776 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | 
| 811 | u32 selector) | 777 | u32 selector) | 
| 812 | { | 778 | { | 
| 813 | struct sk_buff_head head; | 779 | struct sk_buff_head head; | 
| 780 | int rc; | ||
| 814 | 781 | ||
| 815 | skb2list(skb, &head); | 782 | skb2list(skb, &head); | 
| 816 | return tipc_link_xmit(net, &head, dnode, selector); | 783 | rc = tipc_link_xmit(net, &head, dnode, selector); | 
| 784 | if (rc == -ELINKCONG) | ||
| 785 | kfree_skb(skb); | ||
| 786 | return 0; | ||
| 817 | } | 787 | } | 
| 818 | 788 | ||
| 819 | /** | 789 | /** | 
| @@ -840,12 +810,15 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, | |||
| 840 | if (link) | 810 | if (link) | 
| 841 | rc = __tipc_link_xmit(net, link, list); | 811 | rc = __tipc_link_xmit(net, link, list); | 
| 842 | tipc_node_unlock(node); | 812 | tipc_node_unlock(node); | 
| 813 | tipc_node_put(node); | ||
| 843 | } | 814 | } | 
| 844 | if (link) | 815 | if (link) | 
| 845 | return rc; | 816 | return rc; | 
| 846 | 817 | ||
| 847 | if (likely(in_own_node(net, dnode))) | 818 | if (likely(in_own_node(net, dnode))) { | 
| 848 | return tipc_sk_rcv(net, list); | 819 | tipc_sk_rcv(net, list); | 
| 820 | return 0; | ||
| 821 | } | ||
| 849 | 822 | ||
| 850 | __skb_queue_purge(list); | 823 | __skb_queue_purge(list); | 
| 851 | return rc; | 824 | return rc; | 
| @@ -892,14 +865,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) | |||
| 892 | kfree_skb(buf); | 865 | kfree_skb(buf); | 
| 893 | } | 866 | } | 
| 894 | 867 | ||
| 895 | struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | ||
| 896 | const struct sk_buff *skb) | ||
| 897 | { | ||
| 898 | if (skb_queue_is_last(list, skb)) | ||
| 899 | return NULL; | ||
| 900 | return skb->next; | ||
| 901 | } | ||
| 902 | |||
| 903 | /* | 868 | /* | 
| 904 | * tipc_link_push_packets - push unsent packets to bearer | 869 | * tipc_link_push_packets - push unsent packets to bearer | 
| 905 | * | 870 | * | 
| @@ -908,30 +873,24 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | |||
| 908 | * | 873 | * | 
| 909 | * Called with node locked | 874 | * Called with node locked | 
| 910 | */ | 875 | */ | 
| 911 | void tipc_link_push_packets(struct tipc_link *l_ptr) | 876 | void tipc_link_push_packets(struct tipc_link *link) | 
| 912 | { | 877 | { | 
| 913 | struct sk_buff_head *outqueue = &l_ptr->outqueue; | 878 | struct sk_buff *skb; | 
| 914 | struct sk_buff *skb = l_ptr->next_out; | ||
| 915 | struct tipc_msg *msg; | 879 | struct tipc_msg *msg; | 
| 916 | u32 next, first; | 880 | unsigned int ack = mod(link->next_in_no - 1); | 
| 917 | 881 | ||
| 918 | skb_queue_walk_from(outqueue, skb) { | 882 | while (skb_queue_len(&link->transmq) < link->window) { | 
| 919 | msg = buf_msg(skb); | 883 | skb = __skb_dequeue(&link->backlogq); | 
| 920 | next = msg_seqno(msg); | 884 | if (!skb) | 
| 921 | first = buf_seqno(skb_peek(outqueue)); | ||
| 922 | |||
| 923 | if (mod(next - first) < l_ptr->queue_limit[0]) { | ||
| 924 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | ||
| 925 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | ||
| 926 | if (msg_user(msg) == MSG_BUNDLER) | ||
| 927 | TIPC_SKB_CB(skb)->bundling = false; | ||
| 928 | tipc_bearer_send(l_ptr->owner->net, | ||
| 929 | l_ptr->bearer_id, skb, | ||
| 930 | &l_ptr->media_addr); | ||
| 931 | l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); | ||
| 932 | } else { | ||
| 933 | break; | 885 | break; | 
| 934 | } | 886 | msg = buf_msg(skb); | 
| 887 | link->backlog[msg_importance(msg)].len--; | ||
| 888 | msg_set_ack(msg, ack); | ||
| 889 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); | ||
| 890 | link->rcv_unacked = 0; | ||
| 891 | __skb_queue_tail(&link->transmq, skb); | ||
| 892 | tipc_bearer_send(link->owner->net, link->bearer_id, | ||
| 893 | skb, &link->media_addr); | ||
| 935 | } | 894 | } | 
| 936 | } | 895 | } | 
| 937 | 896 | ||
| @@ -978,7 +937,6 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
| 978 | (unsigned long) TIPC_SKB_CB(buf)->handle); | 937 | (unsigned long) TIPC_SKB_CB(buf)->handle); | 
| 979 | 938 | ||
| 980 | n_ptr = tipc_bclink_retransmit_to(net); | 939 | n_ptr = tipc_bclink_retransmit_to(net); | 
| 981 | tipc_node_lock(n_ptr); | ||
| 982 | 940 | ||
| 983 | tipc_addr_string_fill(addr_string, n_ptr->addr); | 941 | tipc_addr_string_fill(addr_string, n_ptr->addr); | 
| 984 | pr_info("Broadcast link info for %s\n", addr_string); | 942 | pr_info("Broadcast link info for %s\n", addr_string); | 
| @@ -990,9 +948,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
| 990 | n_ptr->bclink.oos_state, | 948 | n_ptr->bclink.oos_state, | 
| 991 | n_ptr->bclink.last_sent); | 949 | n_ptr->bclink.last_sent); | 
| 992 | 950 | ||
| 993 | tipc_node_unlock(n_ptr); | 951 | n_ptr->action_flags |= TIPC_BCAST_RESET; | 
| 994 | |||
| 995 | tipc_bclink_set_flags(net, TIPC_BCLINK_RESET); | ||
| 996 | l_ptr->stale_count = 0; | 952 | l_ptr->stale_count = 0; | 
| 997 | } | 953 | } | 
| 998 | } | 954 | } | 
| @@ -1018,8 +974,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
| 1018 | l_ptr->stale_count = 1; | 974 | l_ptr->stale_count = 1; | 
| 1019 | } | 975 | } | 
| 1020 | 976 | ||
| 1021 | skb_queue_walk_from(&l_ptr->outqueue, skb) { | 977 | skb_queue_walk_from(&l_ptr->transmq, skb) { | 
| 1022 | if (!retransmits || skb == l_ptr->next_out) | 978 | if (!retransmits) | 
| 1023 | break; | 979 | break; | 
| 1024 | msg = buf_msg(skb); | 980 | msg = buf_msg(skb); | 
| 1025 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 981 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 
| @@ -1031,72 +987,43 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
| 1031 | } | 987 | } | 
| 1032 | } | 988 | } | 
| 1033 | 989 | ||
| 1034 | static void link_retrieve_defq(struct tipc_link *link, | 990 | /* link_synch(): check if all packets arrived before the synch | 
| 1035 | struct sk_buff_head *list) | 991 | * point have been consumed | 
| 1036 | { | 992 | * Returns true if the parallel links are synched, otherwise false | 
| 1037 | u32 seq_no; | ||
| 1038 | |||
| 1039 | if (skb_queue_empty(&link->deferred_queue)) | ||
| 1040 | return; | ||
| 1041 | |||
| 1042 | seq_no = buf_seqno(skb_peek(&link->deferred_queue)); | ||
| 1043 | if (seq_no == mod(link->next_in_no)) | ||
| 1044 | skb_queue_splice_tail_init(&link->deferred_queue, list); | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | /** | ||
| 1048 | * link_recv_buf_validate - validate basic format of received message | ||
| 1049 | * | ||
| 1050 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
| 1051 | * as much data as the header indicates it should. The routine also ensures | ||
| 1052 | * that the entire message header is stored in the main fragment of the message | ||
| 1053 | * buffer, to simplify future access to message header fields. | ||
| 1054 | * | ||
| 1055 | * Note: Having extra info present in the message header or data areas is OK. | ||
| 1056 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
| 1057 | * introduced by a later release of the protocol. | ||
| 1058 | */ | 993 | */ | 
| 1059 | static int link_recv_buf_validate(struct sk_buff *buf) | 994 | static bool link_synch(struct tipc_link *l) | 
| 1060 | { | 995 | { | 
| 1061 | static u32 min_data_hdr_size[8] = { | 996 | unsigned int post_synch; | 
| 1062 | SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, | 997 | struct tipc_link *pl; | 
| 1063 | MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE | ||
| 1064 | }; | ||
| 1065 | |||
| 1066 | struct tipc_msg *msg; | ||
| 1067 | u32 tipc_hdr[2]; | ||
| 1068 | u32 size; | ||
| 1069 | u32 hdr_size; | ||
| 1070 | u32 min_hdr_size; | ||
| 1071 | |||
| 1072 | /* If this packet comes from the defer queue, the skb has already | ||
| 1073 | * been validated | ||
| 1074 | */ | ||
| 1075 | if (unlikely(TIPC_SKB_CB(buf)->deferred)) | ||
| 1076 | return 1; | ||
| 1077 | 998 | ||
| 1078 | if (unlikely(buf->len < MIN_H_SIZE)) | 999 | pl = tipc_parallel_link(l); | 
| 1079 | return 0; | 1000 | if (pl == l) | 
| 1001 | goto synched; | ||
| 1080 | 1002 | ||
| 1081 | msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); | 1003 | /* Was last pre-synch packet added to input queue ? */ | 
| 1082 | if (msg == NULL) | 1004 | if (less_eq(pl->next_in_no, l->synch_point)) | 
| 1083 | return 0; | 1005 | return false; | 
| 1084 | 1006 | ||
| 1085 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | 1007 | /* Is it still in the input queue ? */ | 
| 1086 | return 0; | 1008 | post_synch = mod(pl->next_in_no - l->synch_point) - 1; | 
| 1009 | if (skb_queue_len(&pl->inputq) > post_synch) | ||
| 1010 | return false; | ||
| 1011 | synched: | ||
| 1012 | l->flags &= ~LINK_SYNCHING; | ||
| 1013 | return true; | ||
| 1014 | } | ||
| 1087 | 1015 | ||
| 1088 | size = msg_size(msg); | 1016 | static void link_retrieve_defq(struct tipc_link *link, | 
| 1089 | hdr_size = msg_hdr_sz(msg); | 1017 | struct sk_buff_head *list) | 
| 1090 | min_hdr_size = msg_isdata(msg) ? | 1018 | { | 
| 1091 | min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; | 1019 | u32 seq_no; | 
| 1092 | 1020 | ||
| 1093 | if (unlikely((hdr_size < min_hdr_size) || | 1021 | if (skb_queue_empty(&link->deferdq)) | 
| 1094 | (size < hdr_size) || | 1022 | return; | 
| 1095 | (buf->len < size) || | ||
| 1096 | (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) | ||
| 1097 | return 0; | ||
| 1098 | 1023 | ||
| 1099 | return pskb_may_pull(buf, hdr_size); | 1024 | seq_no = buf_seqno(skb_peek(&link->deferdq)); | 
| 1025 | if (seq_no == mod(link->next_in_no)) | ||
| 1026 | skb_queue_splice_tail_init(&link->deferdq, list); | ||
| 1100 | } | 1027 | } | 
| 1101 | 1028 | ||
| 1102 | /** | 1029 | /** | 
| @@ -1124,16 +1051,11 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1124 | 1051 | ||
| 1125 | while ((skb = __skb_dequeue(&head))) { | 1052 | while ((skb = __skb_dequeue(&head))) { | 
| 1126 | /* Ensure message is well-formed */ | 1053 | /* Ensure message is well-formed */ | 
| 1127 | if (unlikely(!link_recv_buf_validate(skb))) | 1054 | if (unlikely(!tipc_msg_validate(skb))) | 
| 1128 | goto discard; | ||
| 1129 | |||
| 1130 | /* Ensure message data is a single contiguous unit */ | ||
| 1131 | if (unlikely(skb_linearize(skb))) | ||
| 1132 | goto discard; | 1055 | goto discard; | 
| 1133 | 1056 | ||
| 1134 | /* Handle arrival of a non-unicast link message */ | 1057 | /* Handle arrival of a non-unicast link message */ | 
| 1135 | msg = buf_msg(skb); | 1058 | msg = buf_msg(skb); | 
| 1136 | |||
| 1137 | if (unlikely(msg_non_seq(msg))) { | 1059 | if (unlikely(msg_non_seq(msg))) { | 
| 1138 | if (msg_user(msg) == LINK_CONFIG) | 1060 | if (msg_user(msg) == LINK_CONFIG) | 
| 1139 | tipc_disc_rcv(net, skb, b_ptr); | 1061 | tipc_disc_rcv(net, skb, b_ptr); | 
| @@ -1151,8 +1073,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1151 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); | 1073 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); | 
| 1152 | if (unlikely(!n_ptr)) | 1074 | if (unlikely(!n_ptr)) | 
| 1153 | goto discard; | 1075 | goto discard; | 
| 1154 | tipc_node_lock(n_ptr); | ||
| 1155 | 1076 | ||
| 1077 | tipc_node_lock(n_ptr); | ||
| 1156 | /* Locate unicast link endpoint that should handle message */ | 1078 | /* Locate unicast link endpoint that should handle message */ | 
| 1157 | l_ptr = n_ptr->links[b_ptr->identity]; | 1079 | l_ptr = n_ptr->links[b_ptr->identity]; | 
| 1158 | if (unlikely(!l_ptr)) | 1080 | if (unlikely(!l_ptr)) | 
| @@ -1174,21 +1096,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1174 | ackd = msg_ack(msg); | 1096 | ackd = msg_ack(msg); | 
| 1175 | 1097 | ||
| 1176 | /* Release acked messages */ | 1098 | /* Release acked messages */ | 
| 1177 | if (n_ptr->bclink.recv_permitted) | 1099 | if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg))) | 
| 1178 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); | 1100 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); | 
| 1179 | 1101 | ||
| 1180 | released = 0; | 1102 | released = 0; | 
| 1181 | skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { | 1103 | skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) { | 
| 1182 | if (skb1 == l_ptr->next_out || | 1104 | if (more(buf_seqno(skb1), ackd)) | 
| 1183 | more(buf_seqno(skb1), ackd)) | ||
| 1184 | break; | 1105 | break; | 
| 1185 | __skb_unlink(skb1, &l_ptr->outqueue); | 1106 | __skb_unlink(skb1, &l_ptr->transmq); | 
| 1186 | kfree_skb(skb1); | 1107 | kfree_skb(skb1); | 
| 1187 | released = 1; | 1108 | released = 1; | 
| 1188 | } | 1109 | } | 
| 1189 | 1110 | ||
| 1190 | /* Try sending any messages link endpoint has pending */ | 1111 | /* Try sending any messages link endpoint has pending */ | 
| 1191 | if (unlikely(l_ptr->next_out)) | 1112 | if (unlikely(skb_queue_len(&l_ptr->backlogq))) | 
| 1192 | tipc_link_push_packets(l_ptr); | 1113 | tipc_link_push_packets(l_ptr); | 
| 1193 | 1114 | ||
| 1194 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) | 1115 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) | 
| @@ -1222,18 +1143,23 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1222 | skb = NULL; | 1143 | skb = NULL; | 
| 1223 | goto unlock; | 1144 | goto unlock; | 
| 1224 | } | 1145 | } | 
| 1146 | /* Synchronize with parallel link if applicable */ | ||
| 1147 | if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { | ||
| 1148 | if (!link_synch(l_ptr)) | ||
| 1149 | goto unlock; | ||
| 1150 | } | ||
| 1225 | l_ptr->next_in_no++; | 1151 | l_ptr->next_in_no++; | 
| 1226 | if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) | 1152 | if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) | 
| 1227 | link_retrieve_defq(l_ptr, &head); | 1153 | link_retrieve_defq(l_ptr, &head); | 
| 1228 | 1154 | if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { | |
| 1229 | if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { | ||
| 1230 | l_ptr->stats.sent_acks++; | 1155 | l_ptr->stats.sent_acks++; | 
| 1231 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1156 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); | 
| 1232 | } | 1157 | } | 
| 1233 | tipc_link_input(l_ptr, skb); | 1158 | tipc_link_input(l_ptr, skb); | 
| 1234 | skb = NULL; | 1159 | skb = NULL; | 
| 1235 | unlock: | 1160 | unlock: | 
| 1236 | tipc_node_unlock(n_ptr); | 1161 | tipc_node_unlock(n_ptr); | 
| 1162 | tipc_node_put(n_ptr); | ||
| 1237 | discard: | 1163 | discard: | 
| 1238 | if (unlikely(skb)) | 1164 | if (unlikely(skb)) | 
| 1239 | kfree_skb(skb); | 1165 | kfree_skb(skb); | 
| @@ -1270,7 +1196,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb) | |||
| 1270 | node->action_flags |= TIPC_NAMED_MSG_EVT; | 1196 | node->action_flags |= TIPC_NAMED_MSG_EVT; | 
| 1271 | return true; | 1197 | return true; | 
| 1272 | case MSG_BUNDLER: | 1198 | case MSG_BUNDLER: | 
| 1273 | case CHANGEOVER_PROTOCOL: | 1199 | case TUNNEL_PROTOCOL: | 
| 1274 | case MSG_FRAGMENTER: | 1200 | case MSG_FRAGMENTER: | 
| 1275 | case BCAST_PROTOCOL: | 1201 | case BCAST_PROTOCOL: | 
| 1276 | return false; | 1202 | return false; | 
| @@ -1297,8 +1223,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb) | |||
| 1297 | return; | 1223 | return; | 
| 1298 | 1224 | ||
| 1299 | switch (msg_user(msg)) { | 1225 | switch (msg_user(msg)) { | 
| 1300 | case CHANGEOVER_PROTOCOL: | 1226 | case TUNNEL_PROTOCOL: | 
| 1301 | if (!tipc_link_tunnel_rcv(node, &skb)) | 1227 | if (msg_dup(msg)) { | 
| 1228 | link->flags |= LINK_SYNCHING; | ||
| 1229 | link->synch_point = msg_seqno(msg_get_wrapped(msg)); | ||
| 1230 | kfree_skb(skb); | ||
| 1231 | break; | ||
| 1232 | } | ||
| 1233 | if (!tipc_link_failover_rcv(link, &skb)) | ||
| 1302 | break; | 1234 | break; | 
| 1303 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { | 1235 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { | 
| 1304 | tipc_data_input(link, skb); | 1236 | tipc_data_input(link, skb); | 
| @@ -1393,11 +1325,10 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
| 1393 | return; | 1325 | return; | 
| 1394 | } | 1326 | } | 
| 1395 | 1327 | ||
| 1396 | if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { | 1328 | if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { | 
| 1397 | l_ptr->stats.deferred_recv++; | 1329 | l_ptr->stats.deferred_recv++; | 
| 1398 | TIPC_SKB_CB(buf)->deferred = true; | 1330 | if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) | 
| 1399 | if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) | 1331 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); | 
| 1400 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | ||
| 1401 | } else { | 1332 | } else { | 
| 1402 | l_ptr->stats.duplicates++; | 1333 | l_ptr->stats.duplicates++; | 
| 1403 | } | 1334 | } | 
| @@ -1407,15 +1338,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
| 1407 | * Send protocol message to the other endpoint. | 1338 | * Send protocol message to the other endpoint. | 
| 1408 | */ | 1339 | */ | 
| 1409 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | 1340 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | 
| 1410 | u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) | 1341 | u32 gap, u32 tolerance, u32 priority) | 
| 1411 | { | 1342 | { | 
| 1412 | struct sk_buff *buf = NULL; | 1343 | struct sk_buff *buf = NULL; | 
| 1413 | struct tipc_msg *msg = l_ptr->pmsg; | 1344 | struct tipc_msg *msg = l_ptr->pmsg; | 
| 1414 | u32 msg_size = sizeof(l_ptr->proto_msg); | 1345 | u32 msg_size = sizeof(l_ptr->proto_msg); | 
| 1415 | int r_flag; | 1346 | int r_flag; | 
| 1416 | 1347 | ||
| 1417 | /* Don't send protocol message during link changeover */ | 1348 | /* Don't send protocol message during link failover */ | 
| 1418 | if (l_ptr->exp_msg_count) | 1349 | if (l_ptr->flags & LINK_FAILINGOVER) | 
| 1419 | return; | 1350 | return; | 
| 1420 | 1351 | ||
| 1421 | /* Abort non-RESET send if communication with node is prohibited */ | 1352 | /* Abort non-RESET send if communication with node is prohibited */ | 
| @@ -1433,11 +1364,11 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
| 1433 | 1364 | ||
| 1434 | if (!tipc_link_is_up(l_ptr)) | 1365 | if (!tipc_link_is_up(l_ptr)) | 
| 1435 | return; | 1366 | return; | 
| 1436 | if (l_ptr->next_out) | 1367 | if (skb_queue_len(&l_ptr->backlogq)) | 
| 1437 | next_sent = buf_seqno(l_ptr->next_out); | 1368 | next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); | 
| 1438 | msg_set_next_sent(msg, next_sent); | 1369 | msg_set_next_sent(msg, next_sent); | 
| 1439 | if (!skb_queue_empty(&l_ptr->deferred_queue)) { | 1370 | if (!skb_queue_empty(&l_ptr->deferdq)) { | 
| 1440 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); | 1371 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq)); | 
| 1441 | gap = mod(rec - mod(l_ptr->next_in_no)); | 1372 | gap = mod(rec - mod(l_ptr->next_in_no)); | 
| 1442 | } | 1373 | } | 
| 1443 | msg_set_seq_gap(msg, gap); | 1374 | msg_set_seq_gap(msg, gap); | 
| @@ -1445,35 +1376,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
| 1445 | l_ptr->stats.sent_nacks++; | 1376 | l_ptr->stats.sent_nacks++; | 
| 1446 | msg_set_link_tolerance(msg, tolerance); | 1377 | msg_set_link_tolerance(msg, tolerance); | 
| 1447 | msg_set_linkprio(msg, priority); | 1378 | msg_set_linkprio(msg, priority); | 
| 1448 | msg_set_max_pkt(msg, ack_mtu); | 1379 | msg_set_max_pkt(msg, l_ptr->mtu); | 
| 1449 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1380 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 
| 1450 | msg_set_probe(msg, probe_msg != 0); | 1381 | msg_set_probe(msg, probe_msg != 0); | 
| 1451 | if (probe_msg) { | 1382 | if (probe_msg) | 
| 1452 | u32 mtu = l_ptr->max_pkt; | ||
| 1453 | |||
| 1454 | if ((mtu < l_ptr->max_pkt_target) && | ||
| 1455 | link_working_working(l_ptr) && | ||
| 1456 | l_ptr->fsm_msg_cnt) { | ||
| 1457 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
| 1458 | if (l_ptr->max_pkt_probes == 10) { | ||
| 1459 | l_ptr->max_pkt_target = (msg_size - 4); | ||
| 1460 | l_ptr->max_pkt_probes = 0; | ||
| 1461 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
| 1462 | } | ||
| 1463 | l_ptr->max_pkt_probes++; | ||
| 1464 | } | ||
| 1465 | |||
| 1466 | l_ptr->stats.sent_probes++; | 1383 | l_ptr->stats.sent_probes++; | 
| 1467 | } | ||
| 1468 | l_ptr->stats.sent_states++; | 1384 | l_ptr->stats.sent_states++; | 
| 1469 | } else { /* RESET_MSG or ACTIVATE_MSG */ | 1385 | } else { /* RESET_MSG or ACTIVATE_MSG */ | 
| 1470 | msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); | 1386 | msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1)); | 
| 1471 | msg_set_seq_gap(msg, 0); | 1387 | msg_set_seq_gap(msg, 0); | 
| 1472 | msg_set_next_sent(msg, 1); | 1388 | msg_set_next_sent(msg, 1); | 
| 1473 | msg_set_probe(msg, 0); | 1389 | msg_set_probe(msg, 0); | 
| 1474 | msg_set_link_tolerance(msg, l_ptr->tolerance); | 1390 | msg_set_link_tolerance(msg, l_ptr->tolerance); | 
| 1475 | msg_set_linkprio(msg, l_ptr->priority); | 1391 | msg_set_linkprio(msg, l_ptr->priority); | 
| 1476 | msg_set_max_pkt(msg, l_ptr->max_pkt_target); | 1392 | msg_set_max_pkt(msg, l_ptr->advertised_mtu); | 
| 1477 | } | 1393 | } | 
| 1478 | 1394 | ||
| 1479 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); | 1395 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); | 
| @@ -1489,10 +1405,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
| 1489 | 1405 | ||
| 1490 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); | 1406 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); | 
| 1491 | buf->priority = TC_PRIO_CONTROL; | 1407 | buf->priority = TC_PRIO_CONTROL; | 
| 1492 | |||
| 1493 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, | 1408 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, | 
| 1494 | &l_ptr->media_addr); | 1409 | &l_ptr->media_addr); | 
| 1495 | l_ptr->unacked_window = 0; | 1410 | l_ptr->rcv_unacked = 0; | 
| 1496 | kfree_skb(buf); | 1411 | kfree_skb(buf); | 
| 1497 | } | 1412 | } | 
| 1498 | 1413 | ||
| @@ -1505,13 +1420,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1505 | struct sk_buff *buf) | 1420 | struct sk_buff *buf) | 
| 1506 | { | 1421 | { | 
| 1507 | u32 rec_gap = 0; | 1422 | u32 rec_gap = 0; | 
| 1508 | u32 max_pkt_info; | ||
| 1509 | u32 max_pkt_ack; | ||
| 1510 | u32 msg_tol; | 1423 | u32 msg_tol; | 
| 1511 | struct tipc_msg *msg = buf_msg(buf); | 1424 | struct tipc_msg *msg = buf_msg(buf); | 
| 1512 | 1425 | ||
| 1513 | /* Discard protocol message during link changeover */ | 1426 | if (l_ptr->flags & LINK_FAILINGOVER) | 
| 1514 | if (l_ptr->exp_msg_count) | ||
| 1515 | goto exit; | 1427 | goto exit; | 
| 1516 | 1428 | ||
| 1517 | if (l_ptr->net_plane != msg_net_plane(msg)) | 1429 | if (l_ptr->net_plane != msg_net_plane(msg)) | 
| @@ -1550,15 +1462,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1550 | if (msg_linkprio(msg) > l_ptr->priority) | 1462 | if (msg_linkprio(msg) > l_ptr->priority) | 
| 1551 | l_ptr->priority = msg_linkprio(msg); | 1463 | l_ptr->priority = msg_linkprio(msg); | 
| 1552 | 1464 | ||
| 1553 | max_pkt_info = msg_max_pkt(msg); | 1465 | if (l_ptr->mtu > msg_max_pkt(msg)) | 
| 1554 | if (max_pkt_info) { | 1466 | l_ptr->mtu = msg_max_pkt(msg); | 
| 1555 | if (max_pkt_info < l_ptr->max_pkt_target) | ||
| 1556 | l_ptr->max_pkt_target = max_pkt_info; | ||
| 1557 | if (l_ptr->max_pkt > l_ptr->max_pkt_target) | ||
| 1558 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
| 1559 | } else { | ||
| 1560 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
| 1561 | } | ||
| 1562 | 1467 | ||
| 1563 | /* Synchronize broadcast link info, if not done previously */ | 1468 | /* Synchronize broadcast link info, if not done previously */ | 
| 1564 | if (!tipc_node_is_up(l_ptr->owner)) { | 1469 | if (!tipc_node_is_up(l_ptr->owner)) { | 
| @@ -1603,18 +1508,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1603 | mod(l_ptr->next_in_no)); | 1508 | mod(l_ptr->next_in_no)); | 
| 1604 | } | 1509 | } | 
| 1605 | 1510 | ||
| 1606 | max_pkt_ack = msg_max_pkt(msg); | 1511 | if (msg_probe(msg)) | 
| 1607 | if (max_pkt_ack > l_ptr->max_pkt) { | ||
| 1608 | l_ptr->max_pkt = max_pkt_ack; | ||
| 1609 | l_ptr->max_pkt_probes = 0; | ||
| 1610 | } | ||
| 1611 | |||
| 1612 | max_pkt_ack = 0; | ||
| 1613 | if (msg_probe(msg)) { | ||
| 1614 | l_ptr->stats.recv_probes++; | 1512 | l_ptr->stats.recv_probes++; | 
| 1615 | if (msg_size(msg) > sizeof(l_ptr->proto_msg)) | ||
| 1616 | max_pkt_ack = msg_size(msg); | ||
| 1617 | } | ||
| 1618 | 1513 | ||
| 1619 | /* Protocol message before retransmits, reduce loss risk */ | 1514 | /* Protocol message before retransmits, reduce loss risk */ | 
| 1620 | if (l_ptr->owner->bclink.recv_permitted) | 1515 | if (l_ptr->owner->bclink.recv_permitted) | 
| @@ -1622,12 +1517,12 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1622 | msg_last_bcast(msg)); | 1517 | msg_last_bcast(msg)); | 
| 1623 | 1518 | ||
| 1624 | if (rec_gap || (msg_probe(msg))) { | 1519 | if (rec_gap || (msg_probe(msg))) { | 
| 1625 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, | 1520 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, | 
| 1626 | 0, max_pkt_ack); | 1521 | rec_gap, 0, 0); | 
| 1627 | } | 1522 | } | 
| 1628 | if (msg_seq_gap(msg)) { | 1523 | if (msg_seq_gap(msg)) { | 
| 1629 | l_ptr->stats.recv_nacks++; | 1524 | l_ptr->stats.recv_nacks++; | 
| 1630 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), | 1525 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq), | 
| 1631 | msg_seq_gap(msg)); | 1526 | msg_seq_gap(msg)); | 
| 1632 | } | 1527 | } | 
| 1633 | break; | 1528 | break; | 
| @@ -1674,7 +1569,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, | |||
| 1674 | */ | 1569 | */ | 
| 1675 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | 1570 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | 
| 1676 | { | 1571 | { | 
| 1677 | u32 msgcount = skb_queue_len(&l_ptr->outqueue); | 1572 | int msgcount; | 
| 1678 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; | 1573 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; | 
| 1679 | struct tipc_msg tunnel_hdr; | 1574 | struct tipc_msg tunnel_hdr; | 
| 1680 | struct sk_buff *skb; | 1575 | struct sk_buff *skb; | 
| @@ -1683,12 +1578,15 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
| 1683 | if (!tunnel) | 1578 | if (!tunnel) | 
| 1684 | return; | 1579 | return; | 
| 1685 | 1580 | ||
| 1686 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1581 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, | 
| 1687 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); | 1582 | FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); | 
| 1583 | skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); | ||
| 1584 | tipc_link_purge_backlog(l_ptr); | ||
| 1585 | msgcount = skb_queue_len(&l_ptr->transmq); | ||
| 1688 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1586 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 
| 1689 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 1587 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 
| 1690 | 1588 | ||
| 1691 | if (skb_queue_empty(&l_ptr->outqueue)) { | 1589 | if (skb_queue_empty(&l_ptr->transmq)) { | 
| 1692 | skb = tipc_buf_acquire(INT_H_SIZE); | 1590 | skb = tipc_buf_acquire(INT_H_SIZE); | 
| 1693 | if (skb) { | 1591 | if (skb) { | 
| 1694 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); | 1592 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); | 
| @@ -1704,7 +1602,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
| 1704 | split_bundles = (l_ptr->owner->active_links[0] != | 1602 | split_bundles = (l_ptr->owner->active_links[0] != | 
| 1705 | l_ptr->owner->active_links[1]); | 1603 | l_ptr->owner->active_links[1]); | 
| 1706 | 1604 | ||
| 1707 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1605 | skb_queue_walk(&l_ptr->transmq, skb) { | 
| 1708 | struct tipc_msg *msg = buf_msg(skb); | 1606 | struct tipc_msg *msg = buf_msg(skb); | 
| 1709 | 1607 | ||
| 1710 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { | 1608 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { | 
| @@ -1735,157 +1633,105 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
| 1735 | * and sequence order is preserved per sender/receiver socket pair. | 1633 | * and sequence order is preserved per sender/receiver socket pair. | 
| 1736 | * Owner node is locked. | 1634 | * Owner node is locked. | 
| 1737 | */ | 1635 | */ | 
| 1738 | void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, | 1636 | void tipc_link_dup_queue_xmit(struct tipc_link *link, | 
| 1739 | struct tipc_link *tunnel) | 1637 | struct tipc_link *tnl) | 
| 1740 | { | 1638 | { | 
| 1741 | struct sk_buff *skb; | 1639 | struct sk_buff *skb; | 
| 1742 | struct tipc_msg tunnel_hdr; | 1640 | struct tipc_msg tnl_hdr; | 
| 1743 | 1641 | struct sk_buff_head *queue = &link->transmq; | |
| 1744 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1642 | int mcnt; | 
| 1745 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); | 1643 | |
| 1746 | msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); | 1644 | tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, | 
| 1747 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1645 | SYNCH_MSG, INT_H_SIZE, link->addr); | 
| 1748 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1646 | mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); | 
| 1647 | msg_set_msgcnt(&tnl_hdr, mcnt); | ||
| 1648 | msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); | ||
| 1649 | |||
| 1650 | tunnel_queue: | ||
| 1651 | skb_queue_walk(queue, skb) { | ||
| 1749 | struct sk_buff *outskb; | 1652 | struct sk_buff *outskb; | 
| 1750 | struct tipc_msg *msg = buf_msg(skb); | 1653 | struct tipc_msg *msg = buf_msg(skb); | 
| 1751 | u32 length = msg_size(msg); | 1654 | u32 len = msg_size(msg); | 
| 1752 | 1655 | ||
| 1753 | if (msg_user(msg) == MSG_BUNDLER) | 1656 | msg_set_ack(msg, mod(link->next_in_no - 1)); | 
| 1754 | msg_set_type(msg, CLOSED_MSG); | 1657 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); | 
| 1755 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ | 1658 | msg_set_size(&tnl_hdr, len + INT_H_SIZE); | 
| 1756 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1659 | outskb = tipc_buf_acquire(len + INT_H_SIZE); | 
| 1757 | msg_set_size(&tunnel_hdr, length + INT_H_SIZE); | ||
| 1758 | outskb = tipc_buf_acquire(length + INT_H_SIZE); | ||
| 1759 | if (outskb == NULL) { | 1660 | if (outskb == NULL) { | 
| 1760 | pr_warn("%sunable to send duplicate msg\n", | 1661 | pr_warn("%sunable to send duplicate msg\n", | 
| 1761 | link_co_err); | 1662 | link_co_err); | 
| 1762 | return; | 1663 | return; | 
| 1763 | } | 1664 | } | 
| 1764 | skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); | 1665 | skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE); | 
| 1765 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, | 1666 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, | 
| 1766 | length); | 1667 | skb->data, len); | 
| 1767 | __tipc_link_xmit_skb(tunnel, outskb); | 1668 | __tipc_link_xmit_skb(tnl, outskb); | 
| 1768 | if (!tipc_link_is_up(l_ptr)) | 1669 | if (!tipc_link_is_up(link)) | 
| 1769 | return; | 1670 | return; | 
| 1770 | } | 1671 | } | 
| 1771 | } | 1672 | if (queue == &link->backlogq) | 
| 1772 | |||
| 1773 | /** | ||
| 1774 | * buf_extract - extracts embedded TIPC message from another message | ||
| 1775 | * @skb: encapsulating message buffer | ||
| 1776 | * @from_pos: offset to extract from | ||
| 1777 | * | ||
| 1778 | * Returns a new message buffer containing an embedded message. The | ||
| 1779 | * encapsulating buffer is left unchanged. | ||
| 1780 | */ | ||
| 1781 | static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) | ||
| 1782 | { | ||
| 1783 | struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); | ||
| 1784 | u32 size = msg_size(msg); | ||
| 1785 | struct sk_buff *eb; | ||
| 1786 | |||
| 1787 | eb = tipc_buf_acquire(size); | ||
| 1788 | if (eb) | ||
| 1789 | skb_copy_to_linear_data(eb, msg, size); | ||
| 1790 | return eb; | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. | ||
| 1794 | * Owner node is locked. | ||
| 1795 | */ | ||
| 1796 | static void tipc_link_dup_rcv(struct tipc_link *l_ptr, | ||
| 1797 | struct sk_buff *t_buf) | ||
| 1798 | { | ||
| 1799 | struct sk_buff *buf; | ||
| 1800 | |||
| 1801 | if (!tipc_link_is_up(l_ptr)) | ||
| 1802 | return; | 1673 | return; | 
| 1803 | 1674 | queue = &link->backlogq; | |
| 1804 | buf = buf_extract(t_buf, INT_H_SIZE); | 1675 | goto tunnel_queue; | 
| 1805 | if (buf == NULL) { | ||
| 1806 | pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); | ||
| 1807 | return; | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | /* Add buffer to deferred queue, if applicable: */ | ||
| 1811 | link_handle_out_of_seq_msg(l_ptr, buf); | ||
| 1812 | } | 1676 | } | 
| 1813 | 1677 | ||
| 1814 | /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet | 1678 | /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet | 
| 1815 | * Owner node is locked. | 1679 | * Owner node is locked. | 
| 1816 | */ | 1680 | */ | 
| 1817 | static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, | 1681 | static bool tipc_link_failover_rcv(struct tipc_link *link, | 
| 1818 | struct sk_buff *t_buf) | 1682 | struct sk_buff **skb) | 
| 1819 | { | 1683 | { | 
| 1820 | struct tipc_msg *t_msg = buf_msg(t_buf); | 1684 | struct tipc_msg *msg = buf_msg(*skb); | 
| 1821 | struct sk_buff *buf = NULL; | 1685 | struct sk_buff *iskb = NULL; | 
| 1822 | struct tipc_msg *msg; | 1686 | struct tipc_link *pl = NULL; | 
| 1823 | 1687 | int bearer_id = msg_bearer_id(msg); | |
| 1824 | if (tipc_link_is_up(l_ptr)) | 1688 | int pos = 0; | 
| 1825 | tipc_link_reset(l_ptr); | ||
| 1826 | |||
| 1827 | /* First failover packet? */ | ||
| 1828 | if (l_ptr->exp_msg_count == START_CHANGEOVER) | ||
| 1829 | l_ptr->exp_msg_count = msg_msgcnt(t_msg); | ||
| 1830 | |||
| 1831 | /* Should there be an inner packet? */ | ||
| 1832 | if (l_ptr->exp_msg_count) { | ||
| 1833 | l_ptr->exp_msg_count--; | ||
| 1834 | buf = buf_extract(t_buf, INT_H_SIZE); | ||
| 1835 | if (buf == NULL) { | ||
| 1836 | pr_warn("%sno inner failover pkt\n", link_co_err); | ||
| 1837 | goto exit; | ||
| 1838 | } | ||
| 1839 | msg = buf_msg(buf); | ||
| 1840 | 1689 | ||
| 1841 | if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) { | 1690 | if (msg_type(msg) != FAILOVER_MSG) { | 
| 1842 | kfree_skb(buf); | 1691 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); | 
| 1843 | buf = NULL; | 1692 | goto exit; | 
| 1844 | goto exit; | ||
| 1845 | } | ||
| 1846 | if (msg_user(msg) == MSG_FRAGMENTER) { | ||
| 1847 | l_ptr->stats.recv_fragments++; | ||
| 1848 | tipc_buf_append(&l_ptr->reasm_buf, &buf); | ||
| 1849 | } | ||
| 1850 | } | 1693 | } | 
| 1851 | exit: | 1694 | if (bearer_id >= MAX_BEARERS) | 
| 1852 | if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED)) | 1695 | goto exit; | 
| 1853 | tipc_link_delete(l_ptr); | ||
| 1854 | return buf; | ||
| 1855 | } | ||
| 1856 | 1696 | ||
| 1857 | /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent | 1697 | if (bearer_id == link->bearer_id) | 
| 1858 | * via other link as result of a failover (ORIGINAL_MSG) or | 1698 | goto exit; | 
| 1859 | * a new active link (DUPLICATE_MSG). Failover packets are | ||
| 1860 | * returned to the active link for delivery upwards. | ||
| 1861 | * Owner node is locked. | ||
| 1862 | */ | ||
| 1863 | static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, | ||
| 1864 | struct sk_buff **buf) | ||
| 1865 | { | ||
| 1866 | struct sk_buff *t_buf = *buf; | ||
| 1867 | struct tipc_link *l_ptr; | ||
| 1868 | struct tipc_msg *t_msg = buf_msg(t_buf); | ||
| 1869 | u32 bearer_id = msg_bearer_id(t_msg); | ||
| 1870 | 1699 | ||
| 1871 | *buf = NULL; | 1700 | pl = link->owner->links[bearer_id]; | 
| 1701 | if (pl && tipc_link_is_up(pl)) | ||
| 1702 | tipc_link_reset(pl); | ||
| 1872 | 1703 | ||
| 1873 | if (bearer_id >= MAX_BEARERS) | 1704 | if (link->failover_pkts == FIRST_FAILOVER) | 
| 1705 | link->failover_pkts = msg_msgcnt(msg); | ||
| 1706 | |||
| 1707 | /* Should we expect an inner packet? */ | ||
| 1708 | if (!link->failover_pkts) | ||
| 1874 | goto exit; | 1709 | goto exit; | 
| 1875 | 1710 | ||
| 1876 | l_ptr = n_ptr->links[bearer_id]; | 1711 | if (!tipc_msg_extract(*skb, &iskb, &pos)) { | 
| 1877 | if (!l_ptr) | 1712 | pr_warn("%sno inner failover pkt\n", link_co_err); | 
| 1713 | *skb = NULL; | ||
| 1878 | goto exit; | 1714 | goto exit; | 
| 1715 | } | ||
| 1716 | link->failover_pkts--; | ||
| 1717 | *skb = NULL; | ||
| 1879 | 1718 | ||
| 1880 | if (msg_type(t_msg) == DUPLICATE_MSG) | 1719 | /* Was this packet already delivered? */ | 
| 1881 | tipc_link_dup_rcv(l_ptr, t_buf); | 1720 | if (less(buf_seqno(iskb), link->failover_checkpt)) { | 
| 1882 | else if (msg_type(t_msg) == ORIGINAL_MSG) | 1721 | kfree_skb(iskb); | 
| 1883 | *buf = tipc_link_failover_rcv(l_ptr, t_buf); | 1722 | iskb = NULL; | 
| 1884 | else | 1723 | goto exit; | 
| 1885 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); | 1724 | } | 
| 1725 | if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) { | ||
| 1726 | link->stats.recv_fragments++; | ||
| 1727 | tipc_buf_append(&link->failover_skb, &iskb); | ||
| 1728 | } | ||
| 1886 | exit: | 1729 | exit: | 
| 1887 | kfree_skb(t_buf); | 1730 | if (!link->failover_pkts && pl) | 
| 1888 | return *buf != NULL; | 1731 | pl->flags &= ~LINK_FAILINGOVER; | 
| 1732 | kfree_skb(*skb); | ||
| 1733 | *skb = iskb; | ||
| 1734 | return *skb; | ||
| 1889 | } | 1735 | } | 
| 1890 | 1736 | ||
| 1891 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | 1737 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | 
| @@ -1900,23 +1746,16 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | |||
| 1900 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); | 1746 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); | 
| 1901 | } | 1747 | } | 
| 1902 | 1748 | ||
| 1903 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) | 1749 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) | 
| 1904 | { | 1750 | { | 
| 1905 | /* Data messages from this node, inclusive FIRST_FRAGM */ | 1751 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); | 
| 1906 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; | 1752 | |
| 1907 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; | 1753 | l->window = win; | 
| 1908 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; | 1754 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; | 
| 1909 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; | 1755 | l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; | 
| 1910 | /* Transiting data messages,inclusive FIRST_FRAGM */ | 1756 | l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; | 
| 1911 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; | 1757 | l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; | 
| 1912 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; | 1758 | l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; | 
| 1913 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; | ||
| 1914 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; | ||
| 1915 | l_ptr->queue_limit[CONN_MANAGER] = 1200; | ||
| 1916 | l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; | ||
| 1917 | l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; | ||
| 1918 | /* FRAGMENT and LAST_FRAGMENT packets */ | ||
| 1919 | l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; | ||
| 1920 | } | 1759 | } | 
| 1921 | 1760 | ||
| 1922 | /* tipc_link_find_owner - locate owner node of link by link's name | 1761 | /* tipc_link_find_owner - locate owner node of link by link's name | 
| @@ -2081,14 +1920,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
| 2081 | 1920 | ||
| 2082 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1921 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 
| 2083 | link_set_supervision_props(link, tol); | 1922 | link_set_supervision_props(link, tol); | 
| 2084 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); | 1923 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); | 
| 2085 | } | 1924 | } | 
| 2086 | if (props[TIPC_NLA_PROP_PRIO]) { | 1925 | if (props[TIPC_NLA_PROP_PRIO]) { | 
| 2087 | u32 prio; | 1926 | u32 prio; | 
| 2088 | 1927 | ||
| 2089 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 1928 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 
| 2090 | link->priority = prio; | 1929 | link->priority = prio; | 
| 2091 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); | 1930 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); | 
| 2092 | } | 1931 | } | 
| 2093 | if (props[TIPC_NLA_PROP_WIN]) { | 1932 | if (props[TIPC_NLA_PROP_WIN]) { | 
| 2094 | u32 win; | 1933 | u32 win; | 
| @@ -2171,7 +2010,7 @@ msg_full: | |||
| 2171 | 2010 | ||
| 2172 | /* Caller should hold appropriate locks to protect the link */ | 2011 | /* Caller should hold appropriate locks to protect the link */ | 
| 2173 | static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | 2012 | static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | 
| 2174 | struct tipc_link *link) | 2013 | struct tipc_link *link, int nlflags) | 
| 2175 | { | 2014 | { | 
| 2176 | int err; | 2015 | int err; | 
| 2177 | void *hdr; | 2016 | void *hdr; | 
| @@ -2180,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
| 2180 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 2019 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 
| 2181 | 2020 | ||
| 2182 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 2021 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 
| 2183 | NLM_F_MULTI, TIPC_NL_LINK_GET); | 2022 | nlflags, TIPC_NL_LINK_GET); | 
| 2184 | if (!hdr) | 2023 | if (!hdr) | 
| 2185 | return -EMSGSIZE; | 2024 | return -EMSGSIZE; | 
| 2186 | 2025 | ||
| @@ -2193,7 +2032,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
| 2193 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | 2032 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | 
| 2194 | tipc_cluster_mask(tn->own_addr))) | 2033 | tipc_cluster_mask(tn->own_addr))) | 
| 2195 | goto attr_msg_full; | 2034 | goto attr_msg_full; | 
| 2196 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) | 2035 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) | 
| 2197 | goto attr_msg_full; | 2036 | goto attr_msg_full; | 
| 2198 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) | 2037 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) | 
| 2199 | goto attr_msg_full; | 2038 | goto attr_msg_full; | 
| @@ -2215,7 +2054,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
| 2215 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) | 2054 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) | 
| 2216 | goto prop_msg_full; | 2055 | goto prop_msg_full; | 
| 2217 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, | 2056 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, | 
| 2218 | link->queue_limit[TIPC_LOW_IMPORTANCE])) | 2057 | link->window)) | 
| 2219 | goto prop_msg_full; | 2058 | goto prop_msg_full; | 
| 2220 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | 2059 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | 
| 2221 | goto prop_msg_full; | 2060 | goto prop_msg_full; | 
| @@ -2253,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, | |||
| 2253 | if (!node->links[i]) | 2092 | if (!node->links[i]) | 
| 2254 | continue; | 2093 | continue; | 
| 2255 | 2094 | ||
| 2256 | err = __tipc_nl_add_link(net, msg, node->links[i]); | 2095 | err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI); | 
| 2257 | if (err) | 2096 | if (err) | 
| 2258 | return err; | 2097 | return err; | 
| 2259 | } | 2098 | } | 
| @@ -2281,7 +2120,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2281 | msg.seq = cb->nlh->nlmsg_seq; | 2120 | msg.seq = cb->nlh->nlmsg_seq; | 
| 2282 | 2121 | ||
| 2283 | rcu_read_lock(); | 2122 | rcu_read_lock(); | 
| 2284 | |||
| 2285 | if (prev_node) { | 2123 | if (prev_node) { | 
| 2286 | node = tipc_node_find(net, prev_node); | 2124 | node = tipc_node_find(net, prev_node); | 
| 2287 | if (!node) { | 2125 | if (!node) { | 
| @@ -2294,6 +2132,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2294 | cb->prev_seq = 1; | 2132 | cb->prev_seq = 1; | 
| 2295 | goto out; | 2133 | goto out; | 
| 2296 | } | 2134 | } | 
| 2135 | tipc_node_put(node); | ||
| 2297 | 2136 | ||
| 2298 | list_for_each_entry_continue_rcu(node, &tn->node_list, | 2137 | list_for_each_entry_continue_rcu(node, &tn->node_list, | 
| 2299 | list) { | 2138 | list) { | 
| @@ -2367,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) | |||
| 2367 | goto err_out; | 2206 | goto err_out; | 
| 2368 | } | 2207 | } | 
| 2369 | 2208 | ||
| 2370 | err = __tipc_nl_add_link(net, &msg, link); | 2209 | err = __tipc_nl_add_link(net, &msg, link, 0); | 
| 2371 | if (err) | 2210 | if (err) | 
| 2372 | goto err_out; | 2211 | goto err_out; | 
| 2373 | 2212 | ||
| diff --git a/net/tipc/link.h b/net/tipc/link.h index 7aeb52092bf3..b5b4e3554d4e 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h | |||
| @@ -58,8 +58,10 @@ | |||
| 58 | 58 | ||
| 59 | /* Link endpoint execution states | 59 | /* Link endpoint execution states | 
| 60 | */ | 60 | */ | 
| 61 | #define LINK_STARTED 0x0001 | 61 | #define LINK_STARTED 0x0001 | 
| 62 | #define LINK_STOPPED 0x0002 | 62 | #define LINK_STOPPED 0x0002 | 
| 63 | #define LINK_SYNCHING 0x0004 | ||
| 64 | #define LINK_FAILINGOVER 0x0008 | ||
| 63 | 65 | ||
| 64 | /* Starting value for maximum packet size negotiation on unicast links | 66 | /* Starting value for maximum packet size negotiation on unicast links | 
| 65 | * (unless bearer MTU is less) | 67 | * (unless bearer MTU is less) | 
| @@ -118,13 +120,13 @@ struct tipc_stats { | |||
| 118 | * @pmsg: convenience pointer to "proto_msg" field | 120 | * @pmsg: convenience pointer to "proto_msg" field | 
| 119 | * @priority: current link priority | 121 | * @priority: current link priority | 
| 120 | * @net_plane: current link network plane ('A' through 'H') | 122 | * @net_plane: current link network plane ('A' through 'H') | 
| 121 | * @queue_limit: outbound message queue congestion thresholds (indexed by user) | 123 | * @backlog_limit: backlog queue congestion thresholds (indexed by importance) | 
| 122 | * @exp_msg_count: # of tunnelled messages expected during link changeover | 124 | * @exp_msg_count: # of tunnelled messages expected during link changeover | 
| 123 | * @reset_checkpoint: seq # of last acknowledged message at time of link reset | 125 | * @reset_checkpoint: seq # of last acknowledged message at time of link reset | 
| 124 | * @max_pkt: current maximum packet size for this link | 126 | * @mtu: current maximum packet size for this link | 
| 125 | * @max_pkt_target: desired maximum packet size for this link | 127 | * @advertised_mtu: advertised own mtu when link is being established | 
| 126 | * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) | 128 | * @transmitq: queue for sent, non-acked messages | 
| 127 | * @outqueue: outbound message queue | 129 | * @backlogq: queue for messages waiting to be sent | 
| 128 | * @next_out_no: next sequence number to use for outbound messages | 130 | * @next_out_no: next sequence number to use for outbound messages | 
| 129 | * @last_retransmitted: sequence number of most recently retransmitted message | 131 | * @last_retransmitted: sequence number of most recently retransmitted message | 
| 130 | * @stale_count: # of identical retransmit requests made by peer | 132 | * @stale_count: # of identical retransmit requests made by peer | 
| @@ -165,36 +167,40 @@ struct tipc_link { | |||
| 165 | struct tipc_msg *pmsg; | 167 | struct tipc_msg *pmsg; | 
| 166 | u32 priority; | 168 | u32 priority; | 
| 167 | char net_plane; | 169 | char net_plane; | 
| 168 | u32 queue_limit[15]; /* queue_limit[0]==window limit */ | 170 | u16 synch_point; | 
| 169 | 171 | ||
| 170 | /* Changeover */ | 172 | /* Failover */ | 
| 171 | u32 exp_msg_count; | 173 | u16 failover_pkts; | 
| 172 | u32 reset_checkpoint; | 174 | u16 failover_checkpt; | 
| 175 | struct sk_buff *failover_skb; | ||
| 173 | 176 | ||
| 174 | /* Max packet negotiation */ | 177 | /* Max packet negotiation */ | 
| 175 | u32 max_pkt; | 178 | u16 mtu; | 
| 176 | u32 max_pkt_target; | 179 | u16 advertised_mtu; | 
| 177 | u32 max_pkt_probes; | ||
| 178 | 180 | ||
| 179 | /* Sending */ | 181 | /* Sending */ | 
| 180 | struct sk_buff_head outqueue; | 182 | struct sk_buff_head transmq; | 
| 183 | struct sk_buff_head backlogq; | ||
| 184 | struct { | ||
| 185 | u16 len; | ||
| 186 | u16 limit; | ||
| 187 | } backlog[5]; | ||
| 181 | u32 next_out_no; | 188 | u32 next_out_no; | 
| 189 | u32 window; | ||
| 182 | u32 last_retransmitted; | 190 | u32 last_retransmitted; | 
| 183 | u32 stale_count; | 191 | u32 stale_count; | 
| 184 | 192 | ||
| 185 | /* Reception */ | 193 | /* Reception */ | 
| 186 | u32 next_in_no; | 194 | u32 next_in_no; | 
| 187 | struct sk_buff_head deferred_queue; | 195 | u32 rcv_unacked; | 
| 188 | u32 unacked_window; | 196 | struct sk_buff_head deferdq; | 
| 189 | struct sk_buff_head inputq; | 197 | struct sk_buff_head inputq; | 
| 190 | struct sk_buff_head namedq; | 198 | struct sk_buff_head namedq; | 
| 191 | 199 | ||
| 192 | /* Congestion handling */ | 200 | /* Congestion handling */ | 
| 193 | struct sk_buff *next_out; | ||
| 194 | struct sk_buff_head wakeupq; | 201 | struct sk_buff_head wakeupq; | 
| 195 | 202 | ||
| 196 | /* Fragmentation/reassembly */ | 203 | /* Fragmentation/reassembly */ | 
| 197 | u32 long_msg_seq_no; | ||
| 198 | struct sk_buff *reasm_buf; | 204 | struct sk_buff *reasm_buf; | 
| 199 | 205 | ||
| 200 | /* Statistics */ | 206 | /* Statistics */ | 
| @@ -225,7 +231,7 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest, | |||
| 225 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 231 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 
| 226 | struct sk_buff_head *list); | 232 | struct sk_buff_head *list); | 
| 227 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, | 233 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, | 
| 228 | u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); | 234 | u32 gap, u32 tolerance, u32 priority); | 
| 229 | void tipc_link_push_packets(struct tipc_link *l_ptr); | 235 | void tipc_link_push_packets(struct tipc_link *l_ptr); | 
| 230 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); | 236 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); | 
| 231 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); | 237 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); | 
| @@ -302,9 +308,4 @@ static inline int link_reset_reset(struct tipc_link *l_ptr) | |||
| 302 | return l_ptr->state == RESET_RESET; | 308 | return l_ptr->state == RESET_RESET; | 
| 303 | } | 309 | } | 
| 304 | 310 | ||
| 305 | static inline int link_congested(struct tipc_link *l_ptr) | ||
| 306 | { | ||
| 307 | return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0]; | ||
| 308 | } | ||
| 309 | |||
| 310 | #endif | 311 | #endif | 
| diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b6eb90cd3ef7..c3e96e815418 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* | 
| 2 | * net/tipc/msg.c: TIPC message header routines | 2 | * net/tipc/msg.c: TIPC message header routines | 
| 3 | * | 3 | * | 
| 4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB | 
| 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 
| 6 | * All rights reserved. | 6 | * All rights reserved. | 
| 7 | * | 7 | * | 
| @@ -165,6 +165,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 165 | } | 165 | } | 
| 166 | 166 | ||
| 167 | if (fragid == LAST_FRAGMENT) { | 167 | if (fragid == LAST_FRAGMENT) { | 
| 168 | TIPC_SKB_CB(head)->validated = false; | ||
| 169 | if (unlikely(!tipc_msg_validate(head))) | ||
| 170 | goto err; | ||
| 168 | *buf = head; | 171 | *buf = head; | 
| 169 | TIPC_SKB_CB(head)->tail = NULL; | 172 | TIPC_SKB_CB(head)->tail = NULL; | 
| 170 | *headbuf = NULL; | 173 | *headbuf = NULL; | 
| @@ -172,7 +175,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 172 | } | 175 | } | 
| 173 | *buf = NULL; | 176 | *buf = NULL; | 
| 174 | return 0; | 177 | return 0; | 
| 175 | |||
| 176 | err: | 178 | err: | 
| 177 | pr_warn_ratelimited("Unable to build fragment list\n"); | 179 | pr_warn_ratelimited("Unable to build fragment list\n"); | 
| 178 | kfree_skb(*buf); | 180 | kfree_skb(*buf); | 
| @@ -181,6 +183,48 @@ err: | |||
| 181 | return 0; | 183 | return 0; | 
| 182 | } | 184 | } | 
| 183 | 185 | ||
| 186 | /* tipc_msg_validate - validate basic format of received message | ||
| 187 | * | ||
| 188 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
| 189 | * as much data as the header indicates it should. The routine also ensures | ||
| 190 | * that the entire message header is stored in the main fragment of the message | ||
| 191 | * buffer, to simplify future access to message header fields. | ||
| 192 | * | ||
| 193 | * Note: Having extra info present in the message header or data areas is OK. | ||
| 194 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
| 195 | * introduced by a later release of the protocol. | ||
| 196 | */ | ||
| 197 | bool tipc_msg_validate(struct sk_buff *skb) | ||
| 198 | { | ||
| 199 | struct tipc_msg *msg; | ||
| 200 | int msz, hsz; | ||
| 201 | |||
| 202 | if (unlikely(TIPC_SKB_CB(skb)->validated)) | ||
| 203 | return true; | ||
| 204 | if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) | ||
| 205 | return false; | ||
| 206 | |||
| 207 | hsz = msg_hdr_sz(buf_msg(skb)); | ||
| 208 | if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) | ||
| 209 | return false; | ||
| 210 | if (unlikely(!pskb_may_pull(skb, hsz))) | ||
| 211 | return false; | ||
| 212 | |||
| 213 | msg = buf_msg(skb); | ||
| 214 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | ||
| 215 | return false; | ||
| 216 | |||
| 217 | msz = msg_size(msg); | ||
| 218 | if (unlikely(msz < hsz)) | ||
| 219 | return false; | ||
| 220 | if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) | ||
| 221 | return false; | ||
| 222 | if (unlikely(skb->len < msz)) | ||
| 223 | return false; | ||
| 224 | |||
| 225 | TIPC_SKB_CB(skb)->validated = true; | ||
| 226 | return true; | ||
| 227 | } | ||
| 184 | 228 | ||
| 185 | /** | 229 | /** | 
| 186 | * tipc_msg_build - create buffer chain containing specified header and data | 230 | * tipc_msg_build - create buffer chain containing specified header and data | 
| @@ -228,6 +272,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, | |||
| 228 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); | 272 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); | 
| 229 | msg_set_size(&pkthdr, pktmax); | 273 | msg_set_size(&pkthdr, pktmax); | 
| 230 | msg_set_fragm_no(&pkthdr, pktno); | 274 | msg_set_fragm_no(&pkthdr, pktno); | 
| 275 | msg_set_importance(&pkthdr, msg_importance(mhdr)); | ||
| 231 | 276 | ||
| 232 | /* Prepare first fragment */ | 277 | /* Prepare first fragment */ | 
| 233 | skb = tipc_buf_acquire(pktmax); | 278 | skb = tipc_buf_acquire(pktmax); | 
| @@ -286,33 +331,36 @@ error: | |||
| 286 | 331 | ||
| 287 | /** | 332 | /** | 
| 288 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one | 333 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one | 
| 289 | * @list: the buffer chain of the existing buffer ("bundle") | 334 | * @bskb: the buffer to append to ("bundle") | 
| 290 | * @skb: buffer to be appended | 335 | * @skb: buffer to be appended | 
| 291 | * @mtu: max allowable size for the bundle buffer | 336 | * @mtu: max allowable size for the bundle buffer | 
| 292 | * Consumes buffer if successful | 337 | * Consumes buffer if successful | 
| 293 | * Returns true if bundling could be performed, otherwise false | 338 | * Returns true if bundling could be performed, otherwise false | 
| 294 | */ | 339 | */ | 
| 295 | bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | 340 | bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu) | 
| 296 | { | 341 | { | 
| 297 | struct sk_buff *bskb = skb_peek_tail(list); | 342 | struct tipc_msg *bmsg; | 
| 298 | struct tipc_msg *bmsg = buf_msg(bskb); | ||
| 299 | struct tipc_msg *msg = buf_msg(skb); | 343 | struct tipc_msg *msg = buf_msg(skb); | 
| 300 | unsigned int bsz = msg_size(bmsg); | 344 | unsigned int bsz; | 
| 301 | unsigned int msz = msg_size(msg); | 345 | unsigned int msz = msg_size(msg); | 
| 302 | u32 start = align(bsz); | 346 | u32 start, pad; | 
| 303 | u32 max = mtu - INT_H_SIZE; | 347 | u32 max = mtu - INT_H_SIZE; | 
| 304 | u32 pad = start - bsz; | ||
| 305 | 348 | ||
| 306 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) | 349 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) | 
| 307 | return false; | 350 | return false; | 
| 308 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) | 351 | if (!bskb) | 
| 352 | return false; | ||
| 353 | bmsg = buf_msg(bskb); | ||
| 354 | bsz = msg_size(bmsg); | ||
| 355 | start = align(bsz); | ||
| 356 | pad = start - bsz; | ||
| 357 | |||
| 358 | if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) | ||
| 309 | return false; | 359 | return false; | 
| 310 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | 360 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | 
| 311 | return false; | 361 | return false; | 
| 312 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) | 362 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) | 
| 313 | return false; | 363 | return false; | 
| 314 | if (likely(!TIPC_SKB_CB(bskb)->bundling)) | ||
| 315 | return false; | ||
| 316 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) | 364 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) | 
| 317 | return false; | 365 | return false; | 
| 318 | if (unlikely(max < (start + msz))) | 366 | if (unlikely(max < (start + msz))) | 
| @@ -328,34 +376,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | |||
| 328 | 376 | ||
| 329 | /** | 377 | /** | 
| 330 | * tipc_msg_extract(): extract bundled inner packet from buffer | 378 | * tipc_msg_extract(): extract bundled inner packet from buffer | 
| 331 | * @skb: linear outer buffer, to be extracted from. | 379 | * @skb: buffer to be extracted from. | 
| 332 | * @iskb: extracted inner buffer, to be returned | 380 | * @iskb: extracted inner buffer, to be returned | 
| 333 | * @pos: position of msg to be extracted. Returns with pointer of next msg | 381 | * @pos: position in outer message of msg to be extracted. | 
| 382 | * Returns position of next msg | ||
| 334 | * Consumes outer buffer when last packet extracted | 383 | * Consumes outer buffer when last packet extracted | 
| 335 | * Returns true when when there is an extracted buffer, otherwise false | 384 | * Returns true when when there is an extracted buffer, otherwise false | 
| 336 | */ | 385 | */ | 
| 337 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) | 386 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) | 
| 338 | { | 387 | { | 
| 339 | struct tipc_msg *msg = buf_msg(skb); | 388 | struct tipc_msg *msg; | 
| 340 | int imsz; | 389 | int imsz, offset; | 
| 341 | struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos); | ||
| 342 | 390 | ||
| 343 | /* Is there space left for shortest possible message? */ | 391 | *iskb = NULL; | 
| 344 | if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE)) | 392 | if (unlikely(skb_linearize(skb))) | 
| 393 | goto none; | ||
| 394 | |||
| 395 | msg = buf_msg(skb); | ||
| 396 | offset = msg_hdr_sz(msg) + *pos; | ||
| 397 | if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) | ||
| 345 | goto none; | 398 | goto none; | 
| 346 | imsz = msg_size(imsg); | ||
| 347 | 399 | ||
| 348 | /* Is there space left for current message ? */ | 400 | *iskb = skb_clone(skb, GFP_ATOMIC); | 
| 349 | if ((*pos + imsz) > msg_data_sz(msg)) | 401 | if (unlikely(!*iskb)) | 
| 350 | goto none; | 402 | goto none; | 
| 351 | *iskb = tipc_buf_acquire(imsz); | 403 | skb_pull(*iskb, offset); | 
| 352 | if (!*iskb) | 404 | imsz = msg_size(buf_msg(*iskb)); | 
| 405 | skb_trim(*iskb, imsz); | ||
| 406 | if (unlikely(!tipc_msg_validate(*iskb))) | ||
| 353 | goto none; | 407 | goto none; | 
| 354 | skb_copy_to_linear_data(*iskb, imsg, imsz); | ||
| 355 | *pos += align(imsz); | 408 | *pos += align(imsz); | 
| 356 | return true; | 409 | return true; | 
| 357 | none: | 410 | none: | 
| 358 | kfree_skb(skb); | 411 | kfree_skb(skb); | 
| 412 | kfree_skb(*iskb); | ||
| 359 | *iskb = NULL; | 413 | *iskb = NULL; | 
| 360 | return false; | 414 | return false; | 
| 361 | } | 415 | } | 
| @@ -369,18 +423,17 @@ none: | |||
| 369 | * Replaces buffer if successful | 423 | * Replaces buffer if successful | 
| 370 | * Returns true if success, otherwise false | 424 | * Returns true if success, otherwise false | 
| 371 | */ | 425 | */ | 
| 372 | bool tipc_msg_make_bundle(struct sk_buff_head *list, | 426 | bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode) | 
| 373 | struct sk_buff *skb, u32 mtu, u32 dnode) | ||
| 374 | { | 427 | { | 
| 375 | struct sk_buff *bskb; | 428 | struct sk_buff *bskb; | 
| 376 | struct tipc_msg *bmsg; | 429 | struct tipc_msg *bmsg; | 
| 377 | struct tipc_msg *msg = buf_msg(skb); | 430 | struct tipc_msg *msg = buf_msg(*skb); | 
| 378 | u32 msz = msg_size(msg); | 431 | u32 msz = msg_size(msg); | 
| 379 | u32 max = mtu - INT_H_SIZE; | 432 | u32 max = mtu - INT_H_SIZE; | 
| 380 | 433 | ||
| 381 | if (msg_user(msg) == MSG_FRAGMENTER) | 434 | if (msg_user(msg) == MSG_FRAGMENTER) | 
| 382 | return false; | 435 | return false; | 
| 383 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) | 436 | if (msg_user(msg) == TUNNEL_PROTOCOL) | 
| 384 | return false; | 437 | return false; | 
| 385 | if (msg_user(msg) == BCAST_PROTOCOL) | 438 | if (msg_user(msg) == BCAST_PROTOCOL) | 
| 386 | return false; | 439 | return false; | 
| @@ -398,9 +451,9 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, | |||
| 398 | msg_set_seqno(bmsg, msg_seqno(msg)); | 451 | msg_set_seqno(bmsg, msg_seqno(msg)); | 
| 399 | msg_set_ack(bmsg, msg_ack(msg)); | 452 | msg_set_ack(bmsg, msg_ack(msg)); | 
| 400 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); | 453 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); | 
| 401 | TIPC_SKB_CB(bskb)->bundling = true; | 454 | tipc_msg_bundle(bskb, *skb, mtu); | 
| 402 | __skb_queue_tail(list, bskb); | 455 | *skb = bskb; | 
| 403 | return tipc_msg_bundle(list, skb, mtu); | 456 | return true; | 
| 404 | } | 457 | } | 
| 405 | 458 | ||
| 406 | /** | 459 | /** | 
| @@ -415,21 +468,17 @@ bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, | |||
| 415 | int err) | 468 | int err) | 
| 416 | { | 469 | { | 
| 417 | struct tipc_msg *msg = buf_msg(buf); | 470 | struct tipc_msg *msg = buf_msg(buf); | 
| 418 | uint imp = msg_importance(msg); | ||
| 419 | struct tipc_msg ohdr; | 471 | struct tipc_msg ohdr; | 
| 420 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); | 472 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); | 
| 421 | 473 | ||
| 422 | if (skb_linearize(buf)) | 474 | if (skb_linearize(buf)) | 
| 423 | goto exit; | 475 | goto exit; | 
| 476 | msg = buf_msg(buf); | ||
| 424 | if (msg_dest_droppable(msg)) | 477 | if (msg_dest_droppable(msg)) | 
| 425 | goto exit; | 478 | goto exit; | 
| 426 | if (msg_errcode(msg)) | 479 | if (msg_errcode(msg)) | 
| 427 | goto exit; | 480 | goto exit; | 
| 428 | |||
| 429 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); | 481 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); | 
| 430 | imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); | ||
| 431 | if (msg_isdata(msg)) | ||
| 432 | msg_set_importance(msg, imp); | ||
| 433 | msg_set_errcode(msg, err); | 482 | msg_set_errcode(msg, err); | 
| 434 | msg_set_origport(msg, msg_destport(&ohdr)); | 483 | msg_set_origport(msg, msg_destport(&ohdr)); | 
| 435 | msg_set_destport(msg, msg_origport(&ohdr)); | 484 | msg_set_destport(msg, msg_origport(&ohdr)); | 
| @@ -462,15 +511,18 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, | |||
| 462 | { | 511 | { | 
| 463 | struct tipc_msg *msg = buf_msg(skb); | 512 | struct tipc_msg *msg = buf_msg(skb); | 
| 464 | u32 dport; | 513 | u32 dport; | 
| 514 | u32 own_addr = tipc_own_addr(net); | ||
| 465 | 515 | ||
| 466 | if (!msg_isdata(msg)) | 516 | if (!msg_isdata(msg)) | 
| 467 | return false; | 517 | return false; | 
| 468 | if (!msg_named(msg)) | 518 | if (!msg_named(msg)) | 
| 469 | return false; | 519 | return false; | 
| 520 | if (msg_errcode(msg)) | ||
| 521 | return false; | ||
| 470 | *err = -TIPC_ERR_NO_NAME; | 522 | *err = -TIPC_ERR_NO_NAME; | 
| 471 | if (skb_linearize(skb)) | 523 | if (skb_linearize(skb)) | 
| 472 | return false; | 524 | return false; | 
| 473 | if (msg_reroute_cnt(msg) > 0) | 525 | if (msg_reroute_cnt(msg)) | 
| 474 | return false; | 526 | return false; | 
| 475 | *dnode = addr_domain(net, msg_lookup_scope(msg)); | 527 | *dnode = addr_domain(net, msg_lookup_scope(msg)); | 
| 476 | dport = tipc_nametbl_translate(net, msg_nametype(msg), | 528 | dport = tipc_nametbl_translate(net, msg_nametype(msg), | 
| @@ -478,6 +530,8 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, | |||
| 478 | if (!dport) | 530 | if (!dport) | 
| 479 | return false; | 531 | return false; | 
| 480 | msg_incr_reroute_cnt(msg); | 532 | msg_incr_reroute_cnt(msg); | 
| 533 | if (*dnode != own_addr) | ||
| 534 | msg_set_prevnode(msg, own_addr); | ||
| 481 | msg_set_destnode(msg, *dnode); | 535 | msg_set_destnode(msg, *dnode); | 
| 482 | msg_set_destport(msg, dport); | 536 | msg_set_destport(msg, dport); | 
| 483 | *err = TIPC_OK; | 537 | *err = TIPC_OK; | 
| diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 9ace47f44a69..e1d3595e2ee9 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* | 
| 2 | * net/tipc/msg.h: Include file for TIPC message header routines | 2 | * net/tipc/msg.h: Include file for TIPC message header routines | 
| 3 | * | 3 | * | 
| 4 | * Copyright (c) 2000-2007, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2007, 2014-2015 Ericsson AB | 
| 5 | * Copyright (c) 2005-2008, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005-2008, 2010-2011, Wind River Systems | 
| 6 | * All rights reserved. | 6 | * All rights reserved. | 
| 7 | * | 7 | * | 
| @@ -54,6 +54,8 @@ struct plist; | |||
| 54 | * - TIPC_HIGH_IMPORTANCE | 54 | * - TIPC_HIGH_IMPORTANCE | 
| 55 | * - TIPC_CRITICAL_IMPORTANCE | 55 | * - TIPC_CRITICAL_IMPORTANCE | 
| 56 | */ | 56 | */ | 
| 57 | #define TIPC_SYSTEM_IMPORTANCE 4 | ||
| 58 | |||
| 57 | 59 | ||
| 58 | /* | 60 | /* | 
| 59 | * Payload message types | 61 | * Payload message types | 
| @@ -64,6 +66,19 @@ struct plist; | |||
| 64 | #define TIPC_DIRECT_MSG 3 | 66 | #define TIPC_DIRECT_MSG 3 | 
| 65 | 67 | ||
| 66 | /* | 68 | /* | 
| 69 | * Internal message users | ||
| 70 | */ | ||
| 71 | #define BCAST_PROTOCOL 5 | ||
| 72 | #define MSG_BUNDLER 6 | ||
| 73 | #define LINK_PROTOCOL 7 | ||
| 74 | #define CONN_MANAGER 8 | ||
| 75 | #define TUNNEL_PROTOCOL 10 | ||
| 76 | #define NAME_DISTRIBUTOR 11 | ||
| 77 | #define MSG_FRAGMENTER 12 | ||
| 78 | #define LINK_CONFIG 13 | ||
| 79 | #define SOCK_WAKEUP 14 /* pseudo user */ | ||
| 80 | |||
| 81 | /* | ||
| 67 | * Message header sizes | 82 | * Message header sizes | 
| 68 | */ | 83 | */ | 
| 69 | #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ | 84 | #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ | 
| @@ -76,7 +91,7 @@ struct plist; | |||
| 76 | 91 | ||
| 77 | #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) | 92 | #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) | 
| 78 | 93 | ||
| 79 | #define TIPC_MEDIA_ADDR_OFFSET 5 | 94 | #define TIPC_MEDIA_INFO_OFFSET 5 | 
| 80 | 95 | ||
| 81 | /** | 96 | /** | 
| 82 | * TIPC message buffer code | 97 | * TIPC message buffer code | 
| @@ -87,12 +102,12 @@ struct plist; | |||
| 87 | * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields | 102 | * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields | 
| 88 | * are word aligned for quicker access | 103 | * are word aligned for quicker access | 
| 89 | */ | 104 | */ | 
| 90 | #define BUF_HEADROOM LL_MAX_HEADER | 105 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) | 
| 91 | 106 | ||
| 92 | struct tipc_skb_cb { | 107 | struct tipc_skb_cb { | 
| 93 | void *handle; | 108 | void *handle; | 
| 94 | struct sk_buff *tail; | 109 | struct sk_buff *tail; | 
| 95 | bool deferred; | 110 | bool validated; | 
| 96 | bool wakeup_pending; | 111 | bool wakeup_pending; | 
| 97 | bool bundling; | 112 | bool bundling; | 
| 98 | u16 chain_sz; | 113 | u16 chain_sz; | 
| @@ -170,16 +185,6 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n) | |||
| 170 | msg_set_bits(m, 0, 25, 0xf, n); | 185 | msg_set_bits(m, 0, 25, 0xf, n); | 
| 171 | } | 186 | } | 
| 172 | 187 | ||
| 173 | static inline u32 msg_importance(struct tipc_msg *m) | ||
| 174 | { | ||
| 175 | return msg_bits(m, 0, 25, 0xf); | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline void msg_set_importance(struct tipc_msg *m, u32 i) | ||
| 179 | { | ||
| 180 | msg_set_user(m, i); | ||
| 181 | } | ||
| 182 | |||
| 183 | static inline u32 msg_hdr_sz(struct tipc_msg *m) | 188 | static inline u32 msg_hdr_sz(struct tipc_msg *m) | 
| 184 | { | 189 | { | 
| 185 | return msg_bits(m, 0, 21, 0xf) << 2; | 190 | return msg_bits(m, 0, 21, 0xf) << 2; | 
| @@ -235,6 +240,15 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz) | |||
| 235 | m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz); | 240 | m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz); | 
| 236 | } | 241 | } | 
| 237 | 242 | ||
| 243 | static inline unchar *msg_data(struct tipc_msg *m) | ||
| 244 | { | ||
| 245 | return ((unchar *)m) + msg_hdr_sz(m); | ||
| 246 | } | ||
| 247 | |||
| 248 | static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | ||
| 249 | { | ||
| 250 | return (struct tipc_msg *)msg_data(m); | ||
| 251 | } | ||
| 238 | 252 | ||
| 239 | /* | 253 | /* | 
| 240 | * Word 1 | 254 | * Word 1 | 
| @@ -336,6 +350,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) | |||
| 336 | /* | 350 | /* | 
| 337 | * Words 3-10 | 351 | * Words 3-10 | 
| 338 | */ | 352 | */ | 
| 353 | static inline u32 msg_importance(struct tipc_msg *m) | ||
| 354 | { | ||
| 355 | if (unlikely(msg_user(m) == MSG_FRAGMENTER)) | ||
| 356 | return msg_bits(m, 5, 13, 0x7); | ||
| 357 | if (likely(msg_isdata(m) && !msg_errcode(m))) | ||
| 358 | return msg_user(m); | ||
| 359 | return TIPC_SYSTEM_IMPORTANCE; | ||
| 360 | } | ||
| 361 | |||
| 362 | static inline void msg_set_importance(struct tipc_msg *m, u32 i) | ||
| 363 | { | ||
| 364 | if (unlikely(msg_user(m) == MSG_FRAGMENTER)) | ||
| 365 | msg_set_bits(m, 5, 13, 0x7, i); | ||
| 366 | else if (likely(i < TIPC_SYSTEM_IMPORTANCE)) | ||
| 367 | msg_set_user(m, i); | ||
| 368 | else | ||
| 369 | pr_warn("Trying to set illegal importance in message\n"); | ||
| 370 | } | ||
| 371 | |||
| 339 | static inline u32 msg_prevnode(struct tipc_msg *m) | 372 | static inline u32 msg_prevnode(struct tipc_msg *m) | 
| 340 | { | 373 | { | 
| 341 | return msg_word(m, 3); | 374 | return msg_word(m, 3); | 
| @@ -348,6 +381,8 @@ static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) | |||
| 348 | 381 | ||
| 349 | static inline u32 msg_origport(struct tipc_msg *m) | 382 | static inline u32 msg_origport(struct tipc_msg *m) | 
| 350 | { | 383 | { | 
| 384 | if (msg_user(m) == MSG_FRAGMENTER) | ||
| 385 | m = msg_get_wrapped(m); | ||
| 351 | return msg_word(m, 4); | 386 | return msg_word(m, 4); | 
| 352 | } | 387 | } | 
| 353 | 388 | ||
| @@ -443,35 +478,11 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) | |||
| 443 | msg_set_word(m, 10, n); | 478 | msg_set_word(m, 10, n); | 
| 444 | } | 479 | } | 
| 445 | 480 | ||
| 446 | static inline unchar *msg_data(struct tipc_msg *m) | ||
| 447 | { | ||
| 448 | return ((unchar *)m) + msg_hdr_sz(m); | ||
| 449 | } | ||
| 450 | |||
| 451 | static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | ||
| 452 | { | ||
| 453 | return (struct tipc_msg *)msg_data(m); | ||
| 454 | } | ||
| 455 | |||
| 456 | /* | 481 | /* | 
| 457 | * Constants and routines used to read and write TIPC internal message headers | 482 | * Constants and routines used to read and write TIPC internal message headers | 
| 458 | */ | 483 | */ | 
| 459 | 484 | ||
| 460 | /* | 485 | /* | 
| 461 | * Internal message users | ||
| 462 | */ | ||
| 463 | #define BCAST_PROTOCOL 5 | ||
| 464 | #define MSG_BUNDLER 6 | ||
| 465 | #define LINK_PROTOCOL 7 | ||
| 466 | #define CONN_MANAGER 8 | ||
| 467 | #define ROUTE_DISTRIBUTOR 9 /* obsoleted */ | ||
| 468 | #define CHANGEOVER_PROTOCOL 10 | ||
| 469 | #define NAME_DISTRIBUTOR 11 | ||
| 470 | #define MSG_FRAGMENTER 12 | ||
| 471 | #define LINK_CONFIG 13 | ||
| 472 | #define SOCK_WAKEUP 14 /* pseudo user */ | ||
| 473 | |||
| 474 | /* | ||
| 475 | * Connection management protocol message types | 486 | * Connection management protocol message types | 
| 476 | */ | 487 | */ | 
| 477 | #define CONN_PROBE 0 | 488 | #define CONN_PROBE 0 | 
| @@ -501,8 +512,8 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
| 501 | /* | 512 | /* | 
| 502 | * Changeover tunnel message types | 513 | * Changeover tunnel message types | 
| 503 | */ | 514 | */ | 
| 504 | #define DUPLICATE_MSG 0 | 515 | #define SYNCH_MSG 0 | 
| 505 | #define ORIGINAL_MSG 1 | 516 | #define FAILOVER_MSG 1 | 
| 506 | 517 | ||
| 507 | /* | 518 | /* | 
| 508 | * Config protocol message types | 519 | * Config protocol message types | 
| @@ -510,7 +521,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
| 510 | #define DSC_REQ_MSG 0 | 521 | #define DSC_REQ_MSG 0 | 
| 511 | #define DSC_RESP_MSG 1 | 522 | #define DSC_RESP_MSG 1 | 
| 512 | 523 | ||
| 513 | |||
| 514 | /* | 524 | /* | 
| 515 | * Word 1 | 525 | * Word 1 | 
| 516 | */ | 526 | */ | 
| @@ -534,6 +544,24 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n) | |||
| 534 | msg_set_bits(m, 1, 0, 0xffff, n); | 544 | msg_set_bits(m, 1, 0, 0xffff, n); | 
| 535 | } | 545 | } | 
| 536 | 546 | ||
| 547 | static inline u32 msg_node_capabilities(struct tipc_msg *m) | ||
| 548 | { | ||
| 549 | return msg_bits(m, 1, 15, 0x1fff); | ||
| 550 | } | ||
| 551 | |||
| 552 | static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n) | ||
| 553 | { | ||
| 554 | msg_set_bits(m, 1, 15, 0x1fff, n); | ||
| 555 | } | ||
| 556 | |||
| 557 | static inline bool msg_dup(struct tipc_msg *m) | ||
| 558 | { | ||
| 559 | if (likely(msg_user(m) != TUNNEL_PROTOCOL)) | ||
| 560 | return false; | ||
| 561 | if (msg_type(m) != SYNCH_MSG) | ||
| 562 | return false; | ||
| 563 | return true; | ||
| 564 | } | ||
| 537 | 565 | ||
| 538 | /* | 566 | /* | 
| 539 | * Word 2 | 567 | * Word 2 | 
| @@ -688,7 +716,7 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r) | |||
| 688 | 716 | ||
| 689 | static inline char *msg_media_addr(struct tipc_msg *m) | 717 | static inline char *msg_media_addr(struct tipc_msg *m) | 
| 690 | { | 718 | { | 
| 691 | return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET]; | 719 | return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; | 
| 692 | } | 720 | } | 
| 693 | 721 | ||
| 694 | /* | 722 | /* | 
| @@ -734,21 +762,8 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n) | |||
| 734 | msg_set_bits(m, 9, 0, 0xffff, n); | 762 | msg_set_bits(m, 9, 0, 0xffff, n); | 
| 735 | } | 763 | } | 
| 736 | 764 | ||
| 737 | static inline u32 tipc_msg_tot_importance(struct tipc_msg *m) | ||
| 738 | { | ||
| 739 | if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) | ||
| 740 | return msg_importance(msg_get_wrapped(m)); | ||
| 741 | return msg_importance(m); | ||
| 742 | } | ||
| 743 | |||
| 744 | static inline u32 msg_tot_origport(struct tipc_msg *m) | ||
| 745 | { | ||
| 746 | if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) | ||
| 747 | return msg_origport(msg_get_wrapped(m)); | ||
| 748 | return msg_origport(m); | ||
| 749 | } | ||
| 750 | |||
| 751 | struct sk_buff *tipc_buf_acquire(u32 size); | 765 | struct sk_buff *tipc_buf_acquire(u32 size); | 
| 766 | bool tipc_msg_validate(struct sk_buff *skb); | ||
| 752 | bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, | 767 | bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, | 
| 753 | int err); | 768 | int err); | 
| 754 | void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, | 769 | void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, | 
| @@ -757,9 +772,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, | |||
| 757 | uint data_sz, u32 dnode, u32 onode, | 772 | uint data_sz, u32 dnode, u32 onode, | 
| 758 | u32 dport, u32 oport, int errcode); | 773 | u32 dport, u32 oport, int errcode); | 
| 759 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); | 774 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); | 
| 760 | bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); | 775 | bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu); | 
| 761 | bool tipc_msg_make_bundle(struct sk_buff_head *list, | 776 | |
| 762 | struct sk_buff *skb, u32 mtu, u32 dnode); | 777 | bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode); | 
| 763 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos); | 778 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos); | 
| 764 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, | 779 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, | 
| 765 | int offset, int dsz, int mtu, struct sk_buff_head *list); | 780 | int offset, int dsz, int mtu, struct sk_buff_head *list); | 
| diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index fcb07915aaac..41e7b7e4dda0 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
| @@ -98,7 +98,7 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb) | |||
| 98 | continue; | 98 | continue; | 
| 99 | if (!tipc_node_active_links(node)) | 99 | if (!tipc_node_active_links(node)) | 
| 100 | continue; | 100 | continue; | 
| 101 | oskb = skb_copy(skb, GFP_ATOMIC); | 101 | oskb = pskb_copy(skb, GFP_ATOMIC); | 
| 102 | if (!oskb) | 102 | if (!oskb) | 
| 103 | break; | 103 | break; | 
| 104 | msg_set_destnode(buf_msg(oskb), dnode); | 104 | msg_set_destnode(buf_msg(oskb), dnode); | 
| @@ -244,6 +244,7 @@ static void tipc_publ_subscribe(struct net *net, struct publication *publ, | |||
| 244 | tipc_node_lock(node); | 244 | tipc_node_lock(node); | 
| 245 | list_add_tail(&publ->nodesub_list, &node->publ_list); | 245 | list_add_tail(&publ->nodesub_list, &node->publ_list); | 
| 246 | tipc_node_unlock(node); | 246 | tipc_node_unlock(node); | 
| 247 | tipc_node_put(node); | ||
| 247 | } | 248 | } | 
| 248 | 249 | ||
| 249 | static void tipc_publ_unsubscribe(struct net *net, struct publication *publ, | 250 | static void tipc_publ_unsubscribe(struct net *net, struct publication *publ, | 
| @@ -258,6 +259,7 @@ static void tipc_publ_unsubscribe(struct net *net, struct publication *publ, | |||
| 258 | tipc_node_lock(node); | 259 | tipc_node_lock(node); | 
| 259 | list_del_init(&publ->nodesub_list); | 260 | list_del_init(&publ->nodesub_list); | 
| 260 | tipc_node_unlock(node); | 261 | tipc_node_unlock(node); | 
| 262 | tipc_node_put(node); | ||
| 261 | } | 263 | } | 
| 262 | 264 | ||
| 263 | /** | 265 | /** | 
| diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 105ba7adf06f..ab0ac62a1287 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
| @@ -811,8 +811,8 @@ static void tipc_purge_publications(struct net *net, struct name_seq *seq) | |||
| 811 | sseq = seq->sseqs; | 811 | sseq = seq->sseqs; | 
| 812 | info = sseq->info; | 812 | info = sseq->info; | 
| 813 | list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { | 813 | list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { | 
| 814 | tipc_nametbl_remove_publ(net, publ->type, publ->lower, | 814 | tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node, | 
| 815 | publ->node, publ->ref, publ->key); | 815 | publ->ref, publ->key); | 
| 816 | kfree_rcu(publ, rcu); | 816 | kfree_rcu(publ, rcu); | 
| 817 | } | 817 | } | 
| 818 | hlist_del_init_rcu(&seq->ns_list); | 818 | hlist_del_init_rcu(&seq->ns_list); | 
| diff --git a/net/tipc/node.c b/net/tipc/node.c index 86152de8248d..22c059ad2999 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | 42 | ||
| 43 | static void node_lost_contact(struct tipc_node *n_ptr); | 43 | static void node_lost_contact(struct tipc_node *n_ptr); | 
| 44 | static void node_established_contact(struct tipc_node *n_ptr); | 44 | static void node_established_contact(struct tipc_node *n_ptr); | 
| 45 | static void tipc_node_delete(struct tipc_node *node); | ||
| 45 | 46 | ||
| 46 | struct tipc_sock_conn { | 47 | struct tipc_sock_conn { | 
| 47 | u32 port; | 48 | u32 port; | 
| @@ -67,6 +68,23 @@ static unsigned int tipc_hashfn(u32 addr) | |||
| 67 | return addr & (NODE_HTABLE_SIZE - 1); | 68 | return addr & (NODE_HTABLE_SIZE - 1); | 
| 68 | } | 69 | } | 
| 69 | 70 | ||
| 71 | static void tipc_node_kref_release(struct kref *kref) | ||
| 72 | { | ||
| 73 | struct tipc_node *node = container_of(kref, struct tipc_node, kref); | ||
| 74 | |||
| 75 | tipc_node_delete(node); | ||
| 76 | } | ||
| 77 | |||
| 78 | void tipc_node_put(struct tipc_node *node) | ||
| 79 | { | ||
| 80 | kref_put(&node->kref, tipc_node_kref_release); | ||
| 81 | } | ||
| 82 | |||
| 83 | static void tipc_node_get(struct tipc_node *node) | ||
| 84 | { | ||
| 85 | kref_get(&node->kref); | ||
| 86 | } | ||
| 87 | |||
| 70 | /* | 88 | /* | 
| 71 | * tipc_node_find - locate specified node object, if it exists | 89 | * tipc_node_find - locate specified node object, if it exists | 
| 72 | */ | 90 | */ | 
| @@ -82,6 +100,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr) | |||
| 82 | hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], | 100 | hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], | 
| 83 | hash) { | 101 | hash) { | 
| 84 | if (node->addr == addr) { | 102 | if (node->addr == addr) { | 
| 103 | tipc_node_get(node); | ||
| 85 | rcu_read_unlock(); | 104 | rcu_read_unlock(); | 
| 86 | return node; | 105 | return node; | 
| 87 | } | 106 | } | 
| @@ -106,12 +125,13 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr) | |||
| 106 | } | 125 | } | 
| 107 | n_ptr->addr = addr; | 126 | n_ptr->addr = addr; | 
| 108 | n_ptr->net = net; | 127 | n_ptr->net = net; | 
| 128 | kref_init(&n_ptr->kref); | ||
| 109 | spin_lock_init(&n_ptr->lock); | 129 | spin_lock_init(&n_ptr->lock); | 
| 110 | INIT_HLIST_NODE(&n_ptr->hash); | 130 | INIT_HLIST_NODE(&n_ptr->hash); | 
| 111 | INIT_LIST_HEAD(&n_ptr->list); | 131 | INIT_LIST_HEAD(&n_ptr->list); | 
| 112 | INIT_LIST_HEAD(&n_ptr->publ_list); | 132 | INIT_LIST_HEAD(&n_ptr->publ_list); | 
| 113 | INIT_LIST_HEAD(&n_ptr->conn_sks); | 133 | INIT_LIST_HEAD(&n_ptr->conn_sks); | 
| 114 | __skb_queue_head_init(&n_ptr->bclink.deferred_queue); | 134 | __skb_queue_head_init(&n_ptr->bclink.deferdq); | 
| 115 | hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); | 135 | hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); | 
| 116 | list_for_each_entry_rcu(temp_node, &tn->node_list, list) { | 136 | list_for_each_entry_rcu(temp_node, &tn->node_list, list) { | 
| 117 | if (n_ptr->addr < temp_node->addr) | 137 | if (n_ptr->addr < temp_node->addr) | 
| @@ -120,16 +140,17 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr) | |||
| 120 | list_add_tail_rcu(&n_ptr->list, &temp_node->list); | 140 | list_add_tail_rcu(&n_ptr->list, &temp_node->list); | 
| 121 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; | 141 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; | 
| 122 | n_ptr->signature = INVALID_NODE_SIG; | 142 | n_ptr->signature = INVALID_NODE_SIG; | 
| 143 | tipc_node_get(n_ptr); | ||
| 123 | exit: | 144 | exit: | 
| 124 | spin_unlock_bh(&tn->node_list_lock); | 145 | spin_unlock_bh(&tn->node_list_lock); | 
| 125 | return n_ptr; | 146 | return n_ptr; | 
| 126 | } | 147 | } | 
| 127 | 148 | ||
| 128 | static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr) | 149 | static void tipc_node_delete(struct tipc_node *node) | 
| 129 | { | 150 | { | 
| 130 | list_del_rcu(&n_ptr->list); | 151 | list_del_rcu(&node->list); | 
| 131 | hlist_del_rcu(&n_ptr->hash); | 152 | hlist_del_rcu(&node->hash); | 
| 132 | kfree_rcu(n_ptr, rcu); | 153 | kfree_rcu(node, rcu); | 
| 133 | } | 154 | } | 
| 134 | 155 | ||
| 135 | void tipc_node_stop(struct net *net) | 156 | void tipc_node_stop(struct net *net) | 
| @@ -139,7 +160,7 @@ void tipc_node_stop(struct net *net) | |||
| 139 | 160 | ||
| 140 | spin_lock_bh(&tn->node_list_lock); | 161 | spin_lock_bh(&tn->node_list_lock); | 
| 141 | list_for_each_entry_safe(node, t_node, &tn->node_list, list) | 162 | list_for_each_entry_safe(node, t_node, &tn->node_list, list) | 
| 142 | tipc_node_delete(tn, node); | 163 | tipc_node_put(node); | 
| 143 | spin_unlock_bh(&tn->node_list_lock); | 164 | spin_unlock_bh(&tn->node_list_lock); | 
| 144 | } | 165 | } | 
| 145 | 166 | ||
| @@ -147,6 +168,7 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) | |||
| 147 | { | 168 | { | 
| 148 | struct tipc_node *node; | 169 | struct tipc_node *node; | 
| 149 | struct tipc_sock_conn *conn; | 170 | struct tipc_sock_conn *conn; | 
| 171 | int err = 0; | ||
| 150 | 172 | ||
| 151 | if (in_own_node(net, dnode)) | 173 | if (in_own_node(net, dnode)) | 
| 152 | return 0; | 174 | return 0; | 
| @@ -157,8 +179,10 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) | |||
| 157 | return -EHOSTUNREACH; | 179 | return -EHOSTUNREACH; | 
| 158 | } | 180 | } | 
| 159 | conn = kmalloc(sizeof(*conn), GFP_ATOMIC); | 181 | conn = kmalloc(sizeof(*conn), GFP_ATOMIC); | 
| 160 | if (!conn) | 182 | if (!conn) { | 
| 161 | return -EHOSTUNREACH; | 183 | err = -EHOSTUNREACH; | 
| 184 | goto exit; | ||
| 185 | } | ||
| 162 | conn->peer_node = dnode; | 186 | conn->peer_node = dnode; | 
| 163 | conn->port = port; | 187 | conn->port = port; | 
| 164 | conn->peer_port = peer_port; | 188 | conn->peer_port = peer_port; | 
| @@ -166,7 +190,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) | |||
| 166 | tipc_node_lock(node); | 190 | tipc_node_lock(node); | 
| 167 | list_add_tail(&conn->list, &node->conn_sks); | 191 | list_add_tail(&conn->list, &node->conn_sks); | 
| 168 | tipc_node_unlock(node); | 192 | tipc_node_unlock(node); | 
| 169 | return 0; | 193 | exit: | 
| 194 | tipc_node_put(node); | ||
| 195 | return err; | ||
| 170 | } | 196 | } | 
| 171 | 197 | ||
| 172 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) | 198 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) | 
| @@ -189,6 +215,7 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) | |||
| 189 | kfree(conn); | 215 | kfree(conn); | 
| 190 | } | 216 | } | 
| 191 | tipc_node_unlock(node); | 217 | tipc_node_unlock(node); | 
| 218 | tipc_node_put(node); | ||
| 192 | } | 219 | } | 
| 193 | 220 | ||
| 194 | /** | 221 | /** | 
| @@ -227,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
| 227 | active[0] = active[1] = l_ptr; | 254 | active[0] = active[1] = l_ptr; | 
| 228 | exit: | 255 | exit: | 
| 229 | /* Leave room for changeover header when returning 'mtu' to users: */ | 256 | /* Leave room for changeover header when returning 'mtu' to users: */ | 
| 230 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 257 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; | 
| 231 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 258 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; | 
| 232 | } | 259 | } | 
| 233 | 260 | ||
| 234 | /** | 261 | /** | 
| @@ -292,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
| 292 | 319 | ||
| 293 | /* Leave room for changeover header when returning 'mtu' to users: */ | 320 | /* Leave room for changeover header when returning 'mtu' to users: */ | 
| 294 | if (active[0]) { | 321 | if (active[0]) { | 
| 295 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 322 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; | 
| 296 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 323 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; | 
| 297 | return; | 324 | return; | 
| 298 | } | 325 | } | 
| 299 | |||
| 300 | /* Loopback link went down? No fragmentation needed from now on. */ | 326 | /* Loopback link went down? No fragmentation needed from now on. */ | 
| 301 | if (n_ptr->addr == tn->own_addr) { | 327 | if (n_ptr->addr == tn->own_addr) { | 
| 302 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; | 328 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; | 
| @@ -354,7 +380,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
| 354 | 380 | ||
| 355 | /* Flush broadcast link info associated with lost node */ | 381 | /* Flush broadcast link info associated with lost node */ | 
| 356 | if (n_ptr->bclink.recv_permitted) { | 382 | if (n_ptr->bclink.recv_permitted) { | 
| 357 | __skb_queue_purge(&n_ptr->bclink.deferred_queue); | 383 | __skb_queue_purge(&n_ptr->bclink.deferdq); | 
| 358 | 384 | ||
| 359 | if (n_ptr->bclink.reasm_buf) { | 385 | if (n_ptr->bclink.reasm_buf) { | 
| 360 | kfree_skb(n_ptr->bclink.reasm_buf); | 386 | kfree_skb(n_ptr->bclink.reasm_buf); | 
| @@ -367,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
| 367 | n_ptr->bclink.recv_permitted = false; | 393 | n_ptr->bclink.recv_permitted = false; | 
| 368 | } | 394 | } | 
| 369 | 395 | ||
| 370 | /* Abort link changeover */ | 396 | /* Abort any ongoing link failover */ | 
| 371 | for (i = 0; i < MAX_BEARERS; i++) { | 397 | for (i = 0; i < MAX_BEARERS; i++) { | 
| 372 | struct tipc_link *l_ptr = n_ptr->links[i]; | 398 | struct tipc_link *l_ptr = n_ptr->links[i]; | 
| 373 | if (!l_ptr) | 399 | if (!l_ptr) | 
| 374 | continue; | 400 | continue; | 
| 375 | l_ptr->reset_checkpoint = l_ptr->next_in_no; | 401 | l_ptr->flags &= ~LINK_FAILINGOVER; | 
| 376 | l_ptr->exp_msg_count = 0; | 402 | l_ptr->failover_checkpt = 0; | 
| 403 | l_ptr->failover_pkts = 0; | ||
| 404 | kfree_skb(l_ptr->failover_skb); | ||
| 405 | l_ptr->failover_skb = NULL; | ||
| 377 | tipc_link_reset_fragments(l_ptr); | 406 | tipc_link_reset_fragments(l_ptr); | 
| 378 | |||
| 379 | /* Link marked for deletion after failover? => do it now */ | ||
| 380 | if (l_ptr->flags & LINK_STOPPED) | ||
| 381 | tipc_link_delete(l_ptr); | ||
| 382 | } | 407 | } | 
| 383 | 408 | ||
| 384 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; | 409 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; | 
| @@ -417,19 +442,25 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, | |||
| 417 | char *linkname, size_t len) | 442 | char *linkname, size_t len) | 
| 418 | { | 443 | { | 
| 419 | struct tipc_link *link; | 444 | struct tipc_link *link; | 
| 445 | int err = -EINVAL; | ||
| 420 | struct tipc_node *node = tipc_node_find(net, addr); | 446 | struct tipc_node *node = tipc_node_find(net, addr); | 
| 421 | 447 | ||
| 422 | if ((bearer_id >= MAX_BEARERS) || !node) | 448 | if (!node) | 
| 423 | return -EINVAL; | 449 | return err; | 
| 450 | |||
| 451 | if (bearer_id >= MAX_BEARERS) | ||
| 452 | goto exit; | ||
| 453 | |||
| 424 | tipc_node_lock(node); | 454 | tipc_node_lock(node); | 
| 425 | link = node->links[bearer_id]; | 455 | link = node->links[bearer_id]; | 
| 426 | if (link) { | 456 | if (link) { | 
| 427 | strncpy(linkname, link->name, len); | 457 | strncpy(linkname, link->name, len); | 
| 428 | tipc_node_unlock(node); | 458 | err = 0; | 
| 429 | return 0; | ||
| 430 | } | 459 | } | 
| 460 | exit: | ||
| 431 | tipc_node_unlock(node); | 461 | tipc_node_unlock(node); | 
| 432 | return -EINVAL; | 462 | tipc_node_put(node); | 
| 463 | return err; | ||
| 433 | } | 464 | } | 
| 434 | 465 | ||
| 435 | void tipc_node_unlock(struct tipc_node *node) | 466 | void tipc_node_unlock(struct tipc_node *node) | 
| @@ -459,7 +490,7 @@ void tipc_node_unlock(struct tipc_node *node) | |||
| 459 | TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | | 490 | TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | | 
| 460 | TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP | | 491 | TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP | | 
| 461 | TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT | | 492 | TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT | | 
| 462 | TIPC_NAMED_MSG_EVT); | 493 | TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET); | 
| 463 | 494 | ||
| 464 | spin_unlock_bh(&node->lock); | 495 | spin_unlock_bh(&node->lock); | 
| 465 | 496 | ||
| @@ -488,6 +519,9 @@ void tipc_node_unlock(struct tipc_node *node) | |||
| 488 | 519 | ||
| 489 | if (flags & TIPC_BCAST_MSG_EVT) | 520 | if (flags & TIPC_BCAST_MSG_EVT) | 
| 490 | tipc_bclink_input(net); | 521 | tipc_bclink_input(net); | 
| 522 | |||
| 523 | if (flags & TIPC_BCAST_RESET) | ||
| 524 | tipc_link_reset_all(node); | ||
| 491 | } | 525 | } | 
| 492 | 526 | ||
| 493 | /* Caller should hold node lock for the passed node */ | 527 | /* Caller should hold node lock for the passed node */ | 
| @@ -542,17 +576,21 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 542 | msg.seq = cb->nlh->nlmsg_seq; | 576 | msg.seq = cb->nlh->nlmsg_seq; | 
| 543 | 577 | ||
| 544 | rcu_read_lock(); | 578 | rcu_read_lock(); | 
| 545 | 579 | if (last_addr) { | |
| 546 | if (last_addr && !tipc_node_find(net, last_addr)) { | 580 | node = tipc_node_find(net, last_addr); | 
| 547 | rcu_read_unlock(); | 581 | if (!node) { | 
| 548 | /* We never set seq or call nl_dump_check_consistent() this | 582 | rcu_read_unlock(); | 
| 549 | * means that setting prev_seq here will cause the consistence | 583 | /* We never set seq or call nl_dump_check_consistent() | 
| 550 | * check to fail in the netlink callback handler. Resulting in | 584 | * this means that setting prev_seq here will cause the | 
| 551 | * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if | 585 | * consistence check to fail in the netlink callback | 
| 552 | * the node state changed while we released the lock. | 586 | * handler. Resulting in the NLMSG_DONE message having | 
| 553 | */ | 587 | * the NLM_F_DUMP_INTR flag set if the node state | 
| 554 | cb->prev_seq = 1; | 588 | * changed while we released the lock. | 
| 555 | return -EPIPE; | 589 | */ | 
| 590 | cb->prev_seq = 1; | ||
| 591 | return -EPIPE; | ||
| 592 | } | ||
| 593 | tipc_node_put(node); | ||
| 556 | } | 594 | } | 
| 557 | 595 | ||
| 558 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 596 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 
| diff --git a/net/tipc/node.h b/net/tipc/node.h index 3d18c66b7f78..02d5c20dc551 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h | |||
| @@ -64,7 +64,8 @@ enum { | |||
| 64 | TIPC_NOTIFY_LINK_UP = (1 << 6), | 64 | TIPC_NOTIFY_LINK_UP = (1 << 6), | 
| 65 | TIPC_NOTIFY_LINK_DOWN = (1 << 7), | 65 | TIPC_NOTIFY_LINK_DOWN = (1 << 7), | 
| 66 | TIPC_NAMED_MSG_EVT = (1 << 8), | 66 | TIPC_NAMED_MSG_EVT = (1 << 8), | 
| 67 | TIPC_BCAST_MSG_EVT = (1 << 9) | 67 | TIPC_BCAST_MSG_EVT = (1 << 9), | 
| 68 | TIPC_BCAST_RESET = (1 << 10) | ||
| 68 | }; | 69 | }; | 
| 69 | 70 | ||
| 70 | /** | 71 | /** | 
| @@ -84,7 +85,7 @@ struct tipc_node_bclink { | |||
| 84 | u32 last_sent; | 85 | u32 last_sent; | 
| 85 | u32 oos_state; | 86 | u32 oos_state; | 
| 86 | u32 deferred_size; | 87 | u32 deferred_size; | 
| 87 | struct sk_buff_head deferred_queue; | 88 | struct sk_buff_head deferdq; | 
| 88 | struct sk_buff *reasm_buf; | 89 | struct sk_buff *reasm_buf; | 
| 89 | int inputq_map; | 90 | int inputq_map; | 
| 90 | bool recv_permitted; | 91 | bool recv_permitted; | 
| @@ -93,6 +94,7 @@ struct tipc_node_bclink { | |||
| 93 | /** | 94 | /** | 
| 94 | * struct tipc_node - TIPC node structure | 95 | * struct tipc_node - TIPC node structure | 
| 95 | * @addr: network address of node | 96 | * @addr: network address of node | 
| 97 | * @ref: reference counter to node object | ||
| 96 | * @lock: spinlock governing access to structure | 98 | * @lock: spinlock governing access to structure | 
| 97 | * @net: the applicable net namespace | 99 | * @net: the applicable net namespace | 
| 98 | * @hash: links to adjacent nodes in unsorted hash chain | 100 | * @hash: links to adjacent nodes in unsorted hash chain | 
| @@ -106,6 +108,7 @@ struct tipc_node_bclink { | |||
| 106 | * @list: links to adjacent nodes in sorted list of cluster's nodes | 108 | * @list: links to adjacent nodes in sorted list of cluster's nodes | 
| 107 | * @working_links: number of working links to node (both active and standby) | 109 | * @working_links: number of working links to node (both active and standby) | 
| 108 | * @link_cnt: number of links to node | 110 | * @link_cnt: number of links to node | 
| 111 | * @capabilities: bitmap, indicating peer node's functional capabilities | ||
| 109 | * @signature: node instance identifier | 112 | * @signature: node instance identifier | 
| 110 | * @link_id: local and remote bearer ids of changing link, if any | 113 | * @link_id: local and remote bearer ids of changing link, if any | 
| 111 | * @publ_list: list of publications | 114 | * @publ_list: list of publications | 
| @@ -113,6 +116,7 @@ struct tipc_node_bclink { | |||
| 113 | */ | 116 | */ | 
| 114 | struct tipc_node { | 117 | struct tipc_node { | 
| 115 | u32 addr; | 118 | u32 addr; | 
| 119 | struct kref kref; | ||
| 116 | spinlock_t lock; | 120 | spinlock_t lock; | 
| 117 | struct net *net; | 121 | struct net *net; | 
| 118 | struct hlist_node hash; | 122 | struct hlist_node hash; | 
| @@ -125,7 +129,8 @@ struct tipc_node { | |||
| 125 | struct tipc_node_bclink bclink; | 129 | struct tipc_node_bclink bclink; | 
| 126 | struct list_head list; | 130 | struct list_head list; | 
| 127 | int link_cnt; | 131 | int link_cnt; | 
| 128 | int working_links; | 132 | u16 working_links; | 
| 133 | u16 capabilities; | ||
| 129 | u32 signature; | 134 | u32 signature; | 
| 130 | u32 link_id; | 135 | u32 link_id; | 
| 131 | struct list_head publ_list; | 136 | struct list_head publ_list; | 
| @@ -134,6 +139,7 @@ struct tipc_node { | |||
| 134 | }; | 139 | }; | 
| 135 | 140 | ||
| 136 | struct tipc_node *tipc_node_find(struct net *net, u32 addr); | 141 | struct tipc_node *tipc_node_find(struct net *net, u32 addr); | 
| 142 | void tipc_node_put(struct tipc_node *node); | ||
| 137 | struct tipc_node *tipc_node_create(struct net *net, u32 addr); | 143 | struct tipc_node *tipc_node_create(struct net *net, u32 addr); | 
| 138 | void tipc_node_stop(struct net *net); | 144 | void tipc_node_stop(struct net *net); | 
| 139 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); | 145 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); | 
| @@ -168,10 +174,12 @@ static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector) | |||
| 168 | 174 | ||
| 169 | node = tipc_node_find(net, addr); | 175 | node = tipc_node_find(net, addr); | 
| 170 | 176 | ||
| 171 | if (likely(node)) | 177 | if (likely(node)) { | 
| 172 | mtu = node->act_mtus[selector & 1]; | 178 | mtu = node->act_mtus[selector & 1]; | 
| 173 | else | 179 | tipc_node_put(node); | 
| 180 | } else { | ||
| 174 | mtu = MAX_MSG_SIZE; | 181 | mtu = MAX_MSG_SIZE; | 
| 182 | } | ||
| 175 | 183 | ||
| 176 | return mtu; | 184 | return mtu; | 
| 177 | } | 185 | } | 
| diff --git a/net/tipc/server.c b/net/tipc/server.c index eadd4ed45905..77ff03ed1e18 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
| @@ -37,11 +37,13 @@ | |||
| 37 | #include "core.h" | 37 | #include "core.h" | 
| 38 | #include "socket.h" | 38 | #include "socket.h" | 
| 39 | #include <net/sock.h> | 39 | #include <net/sock.h> | 
| 40 | #include <linux/module.h> | ||
| 40 | 41 | ||
| 41 | /* Number of messages to send before rescheduling */ | 42 | /* Number of messages to send before rescheduling */ | 
| 42 | #define MAX_SEND_MSG_COUNT 25 | 43 | #define MAX_SEND_MSG_COUNT 25 | 
| 43 | #define MAX_RECV_MSG_COUNT 25 | 44 | #define MAX_RECV_MSG_COUNT 25 | 
| 44 | #define CF_CONNECTED 1 | 45 | #define CF_CONNECTED 1 | 
| 46 | #define CF_SERVER 2 | ||
| 45 | 47 | ||
| 46 | #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) | 48 | #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) | 
| 47 | 49 | ||
| @@ -88,9 +90,19 @@ static void tipc_clean_outqueues(struct tipc_conn *con); | |||
| 88 | static void tipc_conn_kref_release(struct kref *kref) | 90 | static void tipc_conn_kref_release(struct kref *kref) | 
| 89 | { | 91 | { | 
| 90 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 92 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 
| 93 | struct sockaddr_tipc *saddr = con->server->saddr; | ||
| 94 | struct socket *sock = con->sock; | ||
| 95 | struct sock *sk; | ||
| 91 | 96 | ||
| 92 | if (con->sock) { | 97 | if (sock) { | 
| 93 | tipc_sock_release_local(con->sock); | 98 | sk = sock->sk; | 
| 99 | if (test_bit(CF_SERVER, &con->flags)) { | ||
| 100 | __module_get(sock->ops->owner); | ||
| 101 | __module_get(sk->sk_prot_creator->owner); | ||
| 102 | } | ||
| 103 | saddr->scope = -TIPC_NODE_SCOPE; | ||
| 104 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); | ||
| 105 | sock_release(sock); | ||
| 94 | con->sock = NULL; | 106 | con->sock = NULL; | 
| 95 | } | 107 | } | 
| 96 | 108 | ||
| @@ -281,7 +293,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con) | |||
| 281 | struct tipc_conn *newcon; | 293 | struct tipc_conn *newcon; | 
| 282 | int ret; | 294 | int ret; | 
| 283 | 295 | ||
| 284 | ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK); | 296 | ret = kernel_accept(sock, &newsock, O_NONBLOCK); | 
| 285 | if (ret < 0) | 297 | if (ret < 0) | 
| 286 | return ret; | 298 | return ret; | 
| 287 | 299 | ||
| @@ -309,7 +321,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |||
| 309 | struct socket *sock = NULL; | 321 | struct socket *sock = NULL; | 
| 310 | int ret; | 322 | int ret; | 
| 311 | 323 | ||
| 312 | ret = tipc_sock_create_local(s->net, s->type, &sock); | 324 | ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1); | 
| 313 | if (ret < 0) | 325 | if (ret < 0) | 
| 314 | return NULL; | 326 | return NULL; | 
| 315 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, | 327 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, | 
| @@ -337,11 +349,31 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |||
| 337 | pr_err("Unknown socket type %d\n", s->type); | 349 | pr_err("Unknown socket type %d\n", s->type); | 
| 338 | goto create_err; | 350 | goto create_err; | 
| 339 | } | 351 | } | 
| 352 | |||
| 353 | /* As server's listening socket owner and creator is the same module, | ||
| 354 | * we have to decrease TIPC module reference count to guarantee that | ||
| 355 | * it remains zero after the server socket is created, otherwise, | ||
| 356 | * executing "rmmod" command is unable to make TIPC module deleted | ||
| 357 | * after TIPC module is inserted successfully. | ||
| 358 | * | ||
| 359 | * However, the reference count is ever increased twice in | ||
| 360 | * sock_create_kern(): one is to increase the reference count of owner | ||
| 361 | * of TIPC socket's proto_ops struct; another is to increment the | ||
| 362 | * reference count of owner of TIPC proto struct. Therefore, we must | ||
| 363 | * decrement the module reference count twice to ensure that it keeps | ||
| 364 | * zero after server's listening socket is created. Of course, we | ||
| 365 | * must bump the module reference count twice as well before the socket | ||
| 366 | * is closed. | ||
| 367 | */ | ||
| 368 | module_put(sock->ops->owner); | ||
| 369 | module_put(sock->sk->sk_prot_creator->owner); | ||
| 370 | set_bit(CF_SERVER, &con->flags); | ||
| 371 | |||
| 340 | return sock; | 372 | return sock; | 
| 341 | 373 | ||
| 342 | create_err: | 374 | create_err: | 
| 375 | kernel_sock_shutdown(sock, SHUT_RDWR); | ||
| 343 | sock_release(sock); | 376 | sock_release(sock); | 
| 344 | con->sock = NULL; | ||
| 345 | return NULL; | 377 | return NULL; | 
| 346 | } | 378 | } | 
| 347 | 379 | ||
| diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f73e975af80b..9074b5cede38 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -35,7 +35,6 @@ | |||
| 35 | */ | 35 | */ | 
| 36 | 36 | ||
| 37 | #include <linux/rhashtable.h> | 37 | #include <linux/rhashtable.h> | 
| 38 | #include <linux/jhash.h> | ||
| 39 | #include "core.h" | 38 | #include "core.h" | 
| 40 | #include "name_table.h" | 39 | #include "name_table.h" | 
| 41 | #include "node.h" | 40 | #include "node.h" | 
| @@ -74,6 +73,7 @@ | |||
| 74 | * @link_cong: non-zero if owner must sleep because of link congestion | 73 | * @link_cong: non-zero if owner must sleep because of link congestion | 
| 75 | * @sent_unacked: # messages sent by socket, and not yet acked by peer | 74 | * @sent_unacked: # messages sent by socket, and not yet acked by peer | 
| 76 | * @rcv_unacked: # messages read by user, but not yet acked back to peer | 75 | * @rcv_unacked: # messages read by user, but not yet acked back to peer | 
| 76 | * @remote: 'connected' peer for dgram/rdm | ||
| 77 | * @node: hash table node | 77 | * @node: hash table node | 
| 78 | * @rcu: rcu struct for tipc_sock | 78 | * @rcu: rcu struct for tipc_sock | 
| 79 | */ | 79 | */ | 
| @@ -96,6 +96,7 @@ struct tipc_sock { | |||
| 96 | bool link_cong; | 96 | bool link_cong; | 
| 97 | uint sent_unacked; | 97 | uint sent_unacked; | 
| 98 | uint rcv_unacked; | 98 | uint rcv_unacked; | 
| 99 | struct sockaddr_tipc remote; | ||
| 99 | struct rhash_head node; | 100 | struct rhash_head node; | 
| 100 | struct rcu_head rcu; | 101 | struct rcu_head rcu; | 
| 101 | }; | 102 | }; | 
| @@ -114,13 +115,14 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | |||
| 114 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); | 115 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); | 
| 115 | static int tipc_sk_insert(struct tipc_sock *tsk); | 116 | static int tipc_sk_insert(struct tipc_sock *tsk); | 
| 116 | static void tipc_sk_remove(struct tipc_sock *tsk); | 117 | static void tipc_sk_remove(struct tipc_sock *tsk); | 
| 118 | static int __tipc_send_stream(struct socket *sock, struct msghdr *m, | ||
| 119 | size_t dsz); | ||
| 120 | static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); | ||
| 117 | 121 | ||
| 118 | static const struct proto_ops packet_ops; | 122 | static const struct proto_ops packet_ops; | 
| 119 | static const struct proto_ops stream_ops; | 123 | static const struct proto_ops stream_ops; | 
| 120 | static const struct proto_ops msg_ops; | 124 | static const struct proto_ops msg_ops; | 
| 121 | |||
| 122 | static struct proto tipc_proto; | 125 | static struct proto tipc_proto; | 
| 123 | static struct proto tipc_proto_kern; | ||
| 124 | 126 | ||
| 125 | static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | 127 | static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | 
| 126 | [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, | 128 | [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, | 
| @@ -130,6 +132,8 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | |||
| 130 | [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } | 132 | [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } | 
| 131 | }; | 133 | }; | 
| 132 | 134 | ||
| 135 | static const struct rhashtable_params tsk_rht_params; | ||
| 136 | |||
| 133 | /* | 137 | /* | 
| 134 | * Revised TIPC socket locking policy: | 138 | * Revised TIPC socket locking policy: | 
| 135 | * | 139 | * | 
| @@ -338,11 +342,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
| 338 | } | 342 | } | 
| 339 | 343 | ||
| 340 | /* Allocate socket's protocol area */ | 344 | /* Allocate socket's protocol area */ | 
| 341 | if (!kern) | 345 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); | 
| 342 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); | ||
| 343 | else | ||
| 344 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern); | ||
| 345 | |||
| 346 | if (sk == NULL) | 346 | if (sk == NULL) | 
| 347 | return -ENOMEM; | 347 | return -ENOMEM; | 
| 348 | 348 | ||
| @@ -380,75 +380,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
| 380 | return 0; | 380 | return 0; | 
| 381 | } | 381 | } | 
| 382 | 382 | ||
| 383 | /** | ||
| 384 | * tipc_sock_create_local - create TIPC socket from inside TIPC module | ||
| 385 | * @type: socket type - SOCK_RDM or SOCK_SEQPACKET | ||
| 386 | * | ||
| 387 | * We cannot use sock_creat_kern here because it bumps module user count. | ||
| 388 | * Since socket owner and creator is the same module we must make sure | ||
| 389 | * that module count remains zero for module local sockets, otherwise | ||
| 390 | * we cannot do rmmod. | ||
| 391 | * | ||
| 392 | * Returns 0 on success, errno otherwise | ||
| 393 | */ | ||
| 394 | int tipc_sock_create_local(struct net *net, int type, struct socket **res) | ||
| 395 | { | ||
| 396 | int rc; | ||
| 397 | |||
| 398 | rc = sock_create_lite(AF_TIPC, type, 0, res); | ||
| 399 | if (rc < 0) { | ||
| 400 | pr_err("Failed to create kernel socket\n"); | ||
| 401 | return rc; | ||
| 402 | } | ||
| 403 | tipc_sk_create(net, *res, 0, 1); | ||
| 404 | |||
| 405 | return 0; | ||
| 406 | } | ||
| 407 | |||
| 408 | /** | ||
| 409 | * tipc_sock_release_local - release socket created by tipc_sock_create_local | ||
| 410 | * @sock: the socket to be released. | ||
| 411 | * | ||
| 412 | * Module reference count is not incremented when such sockets are created, | ||
| 413 | * so we must keep it from being decremented when they are released. | ||
| 414 | */ | ||
| 415 | void tipc_sock_release_local(struct socket *sock) | ||
| 416 | { | ||
| 417 | tipc_release(sock); | ||
| 418 | sock->ops = NULL; | ||
| 419 | sock_release(sock); | ||
| 420 | } | ||
| 421 | |||
| 422 | /** | ||
| 423 | * tipc_sock_accept_local - accept a connection on a socket created | ||
| 424 | * with tipc_sock_create_local. Use this function to avoid that | ||
| 425 | * module reference count is inadvertently incremented. | ||
| 426 | * | ||
| 427 | * @sock: the accepting socket | ||
| 428 | * @newsock: reference to the new socket to be created | ||
| 429 | * @flags: socket flags | ||
| 430 | */ | ||
| 431 | |||
| 432 | int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, | ||
| 433 | int flags) | ||
| 434 | { | ||
| 435 | struct sock *sk = sock->sk; | ||
| 436 | int ret; | ||
| 437 | |||
| 438 | ret = sock_create_lite(sk->sk_family, sk->sk_type, | ||
| 439 | sk->sk_protocol, newsock); | ||
| 440 | if (ret < 0) | ||
| 441 | return ret; | ||
| 442 | |||
| 443 | ret = tipc_accept(sock, *newsock, flags); | ||
| 444 | if (ret < 0) { | ||
| 445 | sock_release(*newsock); | ||
| 446 | return ret; | ||
| 447 | } | ||
| 448 | (*newsock)->ops = sock->ops; | ||
| 449 | return ret; | ||
| 450 | } | ||
| 451 | |||
| 452 | static void tipc_sk_callback(struct rcu_head *head) | 383 | static void tipc_sk_callback(struct rcu_head *head) | 
| 453 | { | 384 | { | 
| 454 | struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); | 385 | struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); | 
| @@ -892,7 +823,6 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) | |||
| 892 | 823 | ||
| 893 | /** | 824 | /** | 
| 894 | * tipc_sendmsg - send message in connectionless manner | 825 | * tipc_sendmsg - send message in connectionless manner | 
| 895 | * @iocb: if NULL, indicates that socket lock is already held | ||
| 896 | * @sock: socket structure | 826 | * @sock: socket structure | 
| 897 | * @m: message to send | 827 | * @m: message to send | 
| 898 | * @dsz: amount of user data to be sent | 828 | * @dsz: amount of user data to be sent | 
| @@ -904,9 +834,21 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) | |||
| 904 | * | 834 | * | 
| 905 | * Returns the number of bytes sent on success, or errno otherwise | 835 | * Returns the number of bytes sent on success, or errno otherwise | 
| 906 | */ | 836 | */ | 
| 907 | static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, | 837 | static int tipc_sendmsg(struct socket *sock, | 
| 908 | struct msghdr *m, size_t dsz) | 838 | struct msghdr *m, size_t dsz) | 
| 909 | { | 839 | { | 
| 840 | struct sock *sk = sock->sk; | ||
| 841 | int ret; | ||
| 842 | |||
| 843 | lock_sock(sk); | ||
| 844 | ret = __tipc_sendmsg(sock, m, dsz); | ||
| 845 | release_sock(sk); | ||
| 846 | |||
| 847 | return ret; | ||
| 848 | } | ||
| 849 | |||
| 850 | static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) | ||
| 851 | { | ||
| 910 | DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); | 852 | DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); | 
| 911 | struct sock *sk = sock->sk; | 853 | struct sock *sk = sock->sk; | 
| 912 | struct tipc_sock *tsk = tipc_sk(sk); | 854 | struct tipc_sock *tsk = tipc_sk(sk); | 
| @@ -915,49 +857,40 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 915 | u32 dnode, dport; | 857 | u32 dnode, dport; | 
| 916 | struct sk_buff_head *pktchain = &sk->sk_write_queue; | 858 | struct sk_buff_head *pktchain = &sk->sk_write_queue; | 
| 917 | struct sk_buff *skb; | 859 | struct sk_buff *skb; | 
| 918 | struct tipc_name_seq *seq = &dest->addr.nameseq; | 860 | struct tipc_name_seq *seq; | 
| 919 | struct iov_iter save; | 861 | struct iov_iter save; | 
| 920 | u32 mtu; | 862 | u32 mtu; | 
| 921 | long timeo; | 863 | long timeo; | 
| 922 | int rc; | 864 | int rc; | 
| 923 | 865 | ||
| 924 | if (unlikely(!dest)) | ||
| 925 | return -EDESTADDRREQ; | ||
| 926 | |||
| 927 | if (unlikely((m->msg_namelen < sizeof(*dest)) || | ||
| 928 | (dest->family != AF_TIPC))) | ||
| 929 | return -EINVAL; | ||
| 930 | |||
| 931 | if (dsz > TIPC_MAX_USER_MSG_SIZE) | 866 | if (dsz > TIPC_MAX_USER_MSG_SIZE) | 
| 932 | return -EMSGSIZE; | 867 | return -EMSGSIZE; | 
| 933 | 868 | if (unlikely(!dest)) { | |
| 934 | if (iocb) | 869 | if (tsk->connected && sock->state == SS_READY) | 
| 935 | lock_sock(sk); | 870 | dest = &tsk->remote; | 
| 936 | 871 | else | |
| 872 | return -EDESTADDRREQ; | ||
| 873 | } else if (unlikely(m->msg_namelen < sizeof(*dest)) || | ||
| 874 | dest->family != AF_TIPC) { | ||
| 875 | return -EINVAL; | ||
| 876 | } | ||
| 937 | if (unlikely(sock->state != SS_READY)) { | 877 | if (unlikely(sock->state != SS_READY)) { | 
| 938 | if (sock->state == SS_LISTENING) { | 878 | if (sock->state == SS_LISTENING) | 
| 939 | rc = -EPIPE; | 879 | return -EPIPE; | 
| 940 | goto exit; | 880 | if (sock->state != SS_UNCONNECTED) | 
| 941 | } | 881 | return -EISCONN; | 
| 942 | if (sock->state != SS_UNCONNECTED) { | 882 | if (tsk->published) | 
| 943 | rc = -EISCONN; | 883 | return -EOPNOTSUPP; | 
| 944 | goto exit; | ||
| 945 | } | ||
| 946 | if (tsk->published) { | ||
| 947 | rc = -EOPNOTSUPP; | ||
| 948 | goto exit; | ||
| 949 | } | ||
| 950 | if (dest->addrtype == TIPC_ADDR_NAME) { | 884 | if (dest->addrtype == TIPC_ADDR_NAME) { | 
| 951 | tsk->conn_type = dest->addr.name.name.type; | 885 | tsk->conn_type = dest->addr.name.name.type; | 
| 952 | tsk->conn_instance = dest->addr.name.name.instance; | 886 | tsk->conn_instance = dest->addr.name.name.instance; | 
| 953 | } | 887 | } | 
| 954 | } | 888 | } | 
| 955 | 889 | seq = &dest->addr.nameseq; | |
| 956 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); | 890 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); | 
| 957 | 891 | ||
| 958 | if (dest->addrtype == TIPC_ADDR_MCAST) { | 892 | if (dest->addrtype == TIPC_ADDR_MCAST) { | 
| 959 | rc = tipc_sendmcast(sock, seq, m, dsz, timeo); | 893 | return tipc_sendmcast(sock, seq, m, dsz, timeo); | 
| 960 | goto exit; | ||
| 961 | } else if (dest->addrtype == TIPC_ADDR_NAME) { | 894 | } else if (dest->addrtype == TIPC_ADDR_NAME) { | 
| 962 | u32 type = dest->addr.name.name.type; | 895 | u32 type = dest->addr.name.name.type; | 
| 963 | u32 inst = dest->addr.name.name.instance; | 896 | u32 inst = dest->addr.name.name.instance; | 
| @@ -972,10 +905,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 972 | dport = tipc_nametbl_translate(net, type, inst, &dnode); | 905 | dport = tipc_nametbl_translate(net, type, inst, &dnode); | 
| 973 | msg_set_destnode(mhdr, dnode); | 906 | msg_set_destnode(mhdr, dnode); | 
| 974 | msg_set_destport(mhdr, dport); | 907 | msg_set_destport(mhdr, dport); | 
| 975 | if (unlikely(!dport && !dnode)) { | 908 | if (unlikely(!dport && !dnode)) | 
| 976 | rc = -EHOSTUNREACH; | 909 | return -EHOSTUNREACH; | 
| 977 | goto exit; | ||
| 978 | } | ||
| 979 | } else if (dest->addrtype == TIPC_ADDR_ID) { | 910 | } else if (dest->addrtype == TIPC_ADDR_ID) { | 
| 980 | dnode = dest->addr.id.node; | 911 | dnode = dest->addr.id.node; | 
| 981 | msg_set_type(mhdr, TIPC_DIRECT_MSG); | 912 | msg_set_type(mhdr, TIPC_DIRECT_MSG); | 
| @@ -990,7 +921,7 @@ new_mtu: | |||
| 990 | mtu = tipc_node_get_mtu(net, dnode, tsk->portid); | 921 | mtu = tipc_node_get_mtu(net, dnode, tsk->portid); | 
| 991 | rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); | 922 | rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); | 
| 992 | if (rc < 0) | 923 | if (rc < 0) | 
| 993 | goto exit; | 924 | return rc; | 
| 994 | 925 | ||
| 995 | do { | 926 | do { | 
| 996 | skb = skb_peek(pktchain); | 927 | skb = skb_peek(pktchain); | 
| @@ -1013,9 +944,6 @@ new_mtu: | |||
| 1013 | if (rc) | 944 | if (rc) | 
| 1014 | __skb_queue_purge(pktchain); | 945 | __skb_queue_purge(pktchain); | 
| 1015 | } while (!rc); | 946 | } while (!rc); | 
| 1016 | exit: | ||
| 1017 | if (iocb) | ||
| 1018 | release_sock(sk); | ||
| 1019 | 947 | ||
| 1020 | return rc; | 948 | return rc; | 
| 1021 | } | 949 | } | 
| @@ -1052,7 +980,6 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) | |||
| 1052 | 980 | ||
| 1053 | /** | 981 | /** | 
| 1054 | * tipc_send_stream - send stream-oriented data | 982 | * tipc_send_stream - send stream-oriented data | 
| 1055 | * @iocb: (unused) | ||
| 1056 | * @sock: socket structure | 983 | * @sock: socket structure | 
| 1057 | * @m: data to send | 984 | * @m: data to send | 
| 1058 | * @dsz: total length of data to be transmitted | 985 | * @dsz: total length of data to be transmitted | 
| @@ -1062,8 +989,19 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) | |||
| 1062 | * Returns the number of bytes sent on success (or partial success), | 989 | * Returns the number of bytes sent on success (or partial success), | 
| 1063 | * or errno if no data sent | 990 | * or errno if no data sent | 
| 1064 | */ | 991 | */ | 
| 1065 | static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, | 992 | static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) | 
| 1066 | struct msghdr *m, size_t dsz) | 993 | { | 
| 994 | struct sock *sk = sock->sk; | ||
| 995 | int ret; | ||
| 996 | |||
| 997 | lock_sock(sk); | ||
| 998 | ret = __tipc_send_stream(sock, m, dsz); | ||
| 999 | release_sock(sk); | ||
| 1000 | |||
| 1001 | return ret; | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) | ||
| 1067 | { | 1005 | { | 
| 1068 | struct sock *sk = sock->sk; | 1006 | struct sock *sk = sock->sk; | 
| 1069 | struct net *net = sock_net(sk); | 1007 | struct net *net = sock_net(sk); | 
| @@ -1080,7 +1018,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1080 | 1018 | ||
| 1081 | /* Handle implied connection establishment */ | 1019 | /* Handle implied connection establishment */ | 
| 1082 | if (unlikely(dest)) { | 1020 | if (unlikely(dest)) { | 
| 1083 | rc = tipc_sendmsg(iocb, sock, m, dsz); | 1021 | rc = __tipc_sendmsg(sock, m, dsz); | 
| 1084 | if (dsz && (dsz == rc)) | 1022 | if (dsz && (dsz == rc)) | 
| 1085 | tsk->sent_unacked = 1; | 1023 | tsk->sent_unacked = 1; | 
| 1086 | return rc; | 1024 | return rc; | 
| @@ -1088,15 +1026,11 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1088 | if (dsz > (uint)INT_MAX) | 1026 | if (dsz > (uint)INT_MAX) | 
| 1089 | return -EMSGSIZE; | 1027 | return -EMSGSIZE; | 
| 1090 | 1028 | ||
| 1091 | if (iocb) | ||
| 1092 | lock_sock(sk); | ||
| 1093 | |||
| 1094 | if (unlikely(sock->state != SS_CONNECTED)) { | 1029 | if (unlikely(sock->state != SS_CONNECTED)) { | 
| 1095 | if (sock->state == SS_DISCONNECTING) | 1030 | if (sock->state == SS_DISCONNECTING) | 
| 1096 | rc = -EPIPE; | 1031 | return -EPIPE; | 
| 1097 | else | 1032 | else | 
| 1098 | rc = -ENOTCONN; | 1033 | return -ENOTCONN; | 
| 1099 | goto exit; | ||
| 1100 | } | 1034 | } | 
| 1101 | 1035 | ||
| 1102 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); | 1036 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); | 
| @@ -1108,7 +1042,7 @@ next: | |||
| 1108 | send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); | 1042 | send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); | 
| 1109 | rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); | 1043 | rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); | 
| 1110 | if (unlikely(rc < 0)) | 1044 | if (unlikely(rc < 0)) | 
| 1111 | goto exit; | 1045 | return rc; | 
| 1112 | do { | 1046 | do { | 
| 1113 | if (likely(!tsk_conn_cong(tsk))) { | 1047 | if (likely(!tsk_conn_cong(tsk))) { | 
| 1114 | rc = tipc_link_xmit(net, pktchain, dnode, portid); | 1048 | rc = tipc_link_xmit(net, pktchain, dnode, portid); | 
| @@ -1133,15 +1067,12 @@ next: | |||
| 1133 | if (rc) | 1067 | if (rc) | 
| 1134 | __skb_queue_purge(pktchain); | 1068 | __skb_queue_purge(pktchain); | 
| 1135 | } while (!rc); | 1069 | } while (!rc); | 
| 1136 | exit: | 1070 | |
| 1137 | if (iocb) | ||
| 1138 | release_sock(sk); | ||
| 1139 | return sent ? sent : rc; | 1071 | return sent ? sent : rc; | 
| 1140 | } | 1072 | } | 
| 1141 | 1073 | ||
| 1142 | /** | 1074 | /** | 
| 1143 | * tipc_send_packet - send a connection-oriented message | 1075 | * tipc_send_packet - send a connection-oriented message | 
| 1144 | * @iocb: if NULL, indicates that socket lock is already held | ||
| 1145 | * @sock: socket structure | 1076 | * @sock: socket structure | 
| 1146 | * @m: message to send | 1077 | * @m: message to send | 
| 1147 | * @dsz: length of data to be transmitted | 1078 | * @dsz: length of data to be transmitted | 
| @@ -1150,13 +1081,12 @@ exit: | |||
| 1150 | * | 1081 | * | 
| 1151 | * Returns the number of bytes sent on success, or errno otherwise | 1082 | * Returns the number of bytes sent on success, or errno otherwise | 
| 1152 | */ | 1083 | */ | 
| 1153 | static int tipc_send_packet(struct kiocb *iocb, struct socket *sock, | 1084 | static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) | 
| 1154 | struct msghdr *m, size_t dsz) | ||
| 1155 | { | 1085 | { | 
| 1156 | if (dsz > TIPC_MAX_USER_MSG_SIZE) | 1086 | if (dsz > TIPC_MAX_USER_MSG_SIZE) | 
| 1157 | return -EMSGSIZE; | 1087 | return -EMSGSIZE; | 
| 1158 | 1088 | ||
| 1159 | return tipc_send_stream(iocb, sock, m, dsz); | 1089 | return tipc_send_stream(sock, m, dsz); | 
| 1160 | } | 1090 | } | 
| 1161 | 1091 | ||
| 1162 | /* tipc_sk_finish_conn - complete the setup of a connection | 1092 | /* tipc_sk_finish_conn - complete the setup of a connection | 
| @@ -1317,12 +1247,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
| 1317 | err = 0; | 1247 | err = 0; | 
| 1318 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 1248 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 
| 1319 | break; | 1249 | break; | 
| 1320 | err = sock_intr_errno(timeo); | ||
| 1321 | if (signal_pending(current)) | ||
| 1322 | break; | ||
| 1323 | err = -EAGAIN; | 1250 | err = -EAGAIN; | 
| 1324 | if (!timeo) | 1251 | if (!timeo) | 
| 1325 | break; | 1252 | break; | 
| 1253 | err = sock_intr_errno(timeo); | ||
| 1254 | if (signal_pending(current)) | ||
| 1255 | break; | ||
| 1326 | } | 1256 | } | 
| 1327 | finish_wait(sk_sleep(sk), &wait); | 1257 | finish_wait(sk_sleep(sk), &wait); | 
| 1328 | *timeop = timeo; | 1258 | *timeop = timeo; | 
| @@ -1331,7 +1261,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
| 1331 | 1261 | ||
| 1332 | /** | 1262 | /** | 
| 1333 | * tipc_recvmsg - receive packet-oriented message | 1263 | * tipc_recvmsg - receive packet-oriented message | 
| 1334 | * @iocb: (unused) | ||
| 1335 | * @m: descriptor for message info | 1264 | * @m: descriptor for message info | 
| 1336 | * @buf_len: total size of user buffer area | 1265 | * @buf_len: total size of user buffer area | 
| 1337 | * @flags: receive flags | 1266 | * @flags: receive flags | 
| @@ -1341,8 +1270,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
| 1341 | * | 1270 | * | 
| 1342 | * Returns size of returned message data, errno otherwise | 1271 | * Returns size of returned message data, errno otherwise | 
| 1343 | */ | 1272 | */ | 
| 1344 | static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, | 1273 | static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, | 
| 1345 | struct msghdr *m, size_t buf_len, int flags) | 1274 | int flags) | 
| 1346 | { | 1275 | { | 
| 1347 | struct sock *sk = sock->sk; | 1276 | struct sock *sk = sock->sk; | 
| 1348 | struct tipc_sock *tsk = tipc_sk(sk); | 1277 | struct tipc_sock *tsk = tipc_sk(sk); | 
| @@ -1426,7 +1355,6 @@ exit: | |||
| 1426 | 1355 | ||
| 1427 | /** | 1356 | /** | 
| 1428 | * tipc_recv_stream - receive stream-oriented data | 1357 | * tipc_recv_stream - receive stream-oriented data | 
| 1429 | * @iocb: (unused) | ||
| 1430 | * @m: descriptor for message info | 1358 | * @m: descriptor for message info | 
| 1431 | * @buf_len: total size of user buffer area | 1359 | * @buf_len: total size of user buffer area | 
| 1432 | * @flags: receive flags | 1360 | * @flags: receive flags | 
| @@ -1436,8 +1364,8 @@ exit: | |||
| 1436 | * | 1364 | * | 
| 1437 | * Returns size of returned message data, errno otherwise | 1365 | * Returns size of returned message data, errno otherwise | 
| 1438 | */ | 1366 | */ | 
| 1439 | static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, | 1367 | static int tipc_recv_stream(struct socket *sock, struct msghdr *m, | 
| 1440 | struct msghdr *m, size_t buf_len, int flags) | 1368 | size_t buf_len, int flags) | 
| 1441 | { | 1369 | { | 
| 1442 | struct sock *sk = sock->sk; | 1370 | struct sock *sk = sock->sk; | 
| 1443 | struct tipc_sock *tsk = tipc_sk(sk); | 1371 | struct tipc_sock *tsk = tipc_sk(sk); | 
| @@ -1836,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | |||
| 1836 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) | 1764 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) | 
| 1837 | { | 1765 | { | 
| 1838 | u32 dnode, dport = 0; | 1766 | u32 dnode, dport = 0; | 
| 1839 | int err = -TIPC_ERR_NO_PORT; | 1767 | int err; | 
| 1840 | struct sk_buff *skb; | 1768 | struct sk_buff *skb; | 
| 1841 | struct tipc_sock *tsk; | 1769 | struct tipc_sock *tsk; | 
| 1842 | struct tipc_net *tn; | 1770 | struct tipc_net *tn; | 
| 1843 | struct sock *sk; | 1771 | struct sock *sk; | 
| 1844 | 1772 | ||
| 1845 | while (skb_queue_len(inputq)) { | 1773 | while (skb_queue_len(inputq)) { | 
| 1774 | err = -TIPC_ERR_NO_PORT; | ||
| 1846 | skb = NULL; | 1775 | skb = NULL; | 
| 1847 | dport = tipc_skb_peek_port(inputq, dport); | 1776 | dport = tipc_skb_peek_port(inputq, dport); | 
| 1848 | tsk = tipc_sk_lookup(net, dport); | 1777 | tsk = tipc_sk_lookup(net, dport); | 
| @@ -1909,17 +1838,26 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, | |||
| 1909 | int destlen, int flags) | 1838 | int destlen, int flags) | 
| 1910 | { | 1839 | { | 
| 1911 | struct sock *sk = sock->sk; | 1840 | struct sock *sk = sock->sk; | 
| 1841 | struct tipc_sock *tsk = tipc_sk(sk); | ||
| 1912 | struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; | 1842 | struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; | 
| 1913 | struct msghdr m = {NULL,}; | 1843 | struct msghdr m = {NULL,}; | 
| 1914 | long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout; | 1844 | long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; | 
| 1915 | socket_state previous; | 1845 | socket_state previous; | 
| 1916 | int res; | 1846 | int res = 0; | 
| 1917 | 1847 | ||
| 1918 | lock_sock(sk); | 1848 | lock_sock(sk); | 
| 1919 | 1849 | ||
| 1920 | /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ | 1850 | /* DGRAM/RDM connect(), just save the destaddr */ | 
| 1921 | if (sock->state == SS_READY) { | 1851 | if (sock->state == SS_READY) { | 
| 1922 | res = -EOPNOTSUPP; | 1852 | if (dst->family == AF_UNSPEC) { | 
| 1853 | memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc)); | ||
| 1854 | tsk->connected = 0; | ||
| 1855 | } else if (destlen != sizeof(struct sockaddr_tipc)) { | ||
| 1856 | res = -EINVAL; | ||
| 1857 | } else { | ||
| 1858 | memcpy(&tsk->remote, dest, destlen); | ||
| 1859 | tsk->connected = 1; | ||
| 1860 | } | ||
| 1923 | goto exit; | 1861 | goto exit; | 
| 1924 | } | 1862 | } | 
| 1925 | 1863 | ||
| @@ -1947,7 +1885,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, | |||
| 1947 | if (!timeout) | 1885 | if (!timeout) | 
| 1948 | m.msg_flags = MSG_DONTWAIT; | 1886 | m.msg_flags = MSG_DONTWAIT; | 
| 1949 | 1887 | ||
| 1950 | res = tipc_sendmsg(NULL, sock, &m, 0); | 1888 | res = __tipc_sendmsg(sock, &m, 0); | 
| 1951 | if ((res < 0) && (res != -EWOULDBLOCK)) | 1889 | if ((res < 0) && (res != -EWOULDBLOCK)) | 
| 1952 | goto exit; | 1890 | goto exit; | 
| 1953 | 1891 | ||
| @@ -2027,12 +1965,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) | |||
| 2027 | err = -EINVAL; | 1965 | err = -EINVAL; | 
| 2028 | if (sock->state != SS_LISTENING) | 1966 | if (sock->state != SS_LISTENING) | 
| 2029 | break; | 1967 | break; | 
| 2030 | err = sock_intr_errno(timeo); | ||
| 2031 | if (signal_pending(current)) | ||
| 2032 | break; | ||
| 2033 | err = -EAGAIN; | 1968 | err = -EAGAIN; | 
| 2034 | if (!timeo) | 1969 | if (!timeo) | 
| 2035 | break; | 1970 | break; | 
| 1971 | err = sock_intr_errno(timeo); | ||
| 1972 | if (signal_pending(current)) | ||
| 1973 | break; | ||
| 2036 | } | 1974 | } | 
| 2037 | finish_wait(sk_sleep(sk), &wait); | 1975 | finish_wait(sk_sleep(sk), &wait); | 
| 2038 | return err; | 1976 | return err; | 
| @@ -2103,7 +2041,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) | |||
| 2103 | struct msghdr m = {NULL,}; | 2041 | struct msghdr m = {NULL,}; | 
| 2104 | 2042 | ||
| 2105 | tsk_advance_rx_queue(sk); | 2043 | tsk_advance_rx_queue(sk); | 
| 2106 | tipc_send_packet(NULL, new_sock, &m, 0); | 2044 | __tipc_send_stream(new_sock, &m, 0); | 
| 2107 | } else { | 2045 | } else { | 
| 2108 | __skb_dequeue(&sk->sk_receive_queue); | 2046 | __skb_dequeue(&sk->sk_receive_queue); | 
| 2109 | __skb_queue_head(&new_sk->sk_receive_queue, buf); | 2047 | __skb_queue_head(&new_sk->sk_receive_queue, buf); | 
| @@ -2154,7 +2092,6 @@ restart: | |||
| 2154 | TIPC_CONN_SHUTDOWN)) | 2092 | TIPC_CONN_SHUTDOWN)) | 
| 2155 | tipc_link_xmit_skb(net, skb, dnode, | 2093 | tipc_link_xmit_skb(net, skb, dnode, | 
| 2156 | tsk->portid); | 2094 | tsk->portid); | 
| 2157 | tipc_node_remove_conn(net, dnode, tsk->portid); | ||
| 2158 | } else { | 2095 | } else { | 
| 2159 | dnode = tsk_peer_node(tsk); | 2096 | dnode = tsk_peer_node(tsk); | 
| 2160 | 2097 | ||
| @@ -2312,7 +2249,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) | |||
| 2312 | struct tipc_sock *tsk; | 2249 | struct tipc_sock *tsk; | 
| 2313 | 2250 | ||
| 2314 | rcu_read_lock(); | 2251 | rcu_read_lock(); | 
| 2315 | tsk = rhashtable_lookup(&tn->sk_rht, &portid); | 2252 | tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); | 
| 2316 | if (tsk) | 2253 | if (tsk) | 
| 2317 | sock_hold(&tsk->sk); | 2254 | sock_hold(&tsk->sk); | 
| 2318 | rcu_read_unlock(); | 2255 | rcu_read_unlock(); | 
| @@ -2334,7 +2271,8 @@ static int tipc_sk_insert(struct tipc_sock *tsk) | |||
| 2334 | portid = TIPC_MIN_PORT; | 2271 | portid = TIPC_MIN_PORT; | 
| 2335 | tsk->portid = portid; | 2272 | tsk->portid = portid; | 
| 2336 | sock_hold(&tsk->sk); | 2273 | sock_hold(&tsk->sk); | 
| 2337 | if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node)) | 2274 | if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, | 
| 2275 | tsk_rht_params)) | ||
| 2338 | return 0; | 2276 | return 0; | 
| 2339 | sock_put(&tsk->sk); | 2277 | sock_put(&tsk->sk); | 
| 2340 | } | 2278 | } | 
| @@ -2347,28 +2285,27 @@ static void tipc_sk_remove(struct tipc_sock *tsk) | |||
| 2347 | struct sock *sk = &tsk->sk; | 2285 | struct sock *sk = &tsk->sk; | 
| 2348 | struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); | 2286 | struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); | 
| 2349 | 2287 | ||
| 2350 | if (rhashtable_remove(&tn->sk_rht, &tsk->node)) { | 2288 | if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { | 
| 2351 | WARN_ON(atomic_read(&sk->sk_refcnt) == 1); | 2289 | WARN_ON(atomic_read(&sk->sk_refcnt) == 1); | 
| 2352 | __sock_put(sk); | 2290 | __sock_put(sk); | 
| 2353 | } | 2291 | } | 
| 2354 | } | 2292 | } | 
| 2355 | 2293 | ||
| 2294 | static const struct rhashtable_params tsk_rht_params = { | ||
| 2295 | .nelem_hint = 192, | ||
| 2296 | .head_offset = offsetof(struct tipc_sock, node), | ||
| 2297 | .key_offset = offsetof(struct tipc_sock, portid), | ||
| 2298 | .key_len = sizeof(u32), /* portid */ | ||
| 2299 | .max_size = 1048576, | ||
| 2300 | .min_size = 256, | ||
| 2301 | .automatic_shrinking = true, | ||
| 2302 | }; | ||
| 2303 | |||
| 2356 | int tipc_sk_rht_init(struct net *net) | 2304 | int tipc_sk_rht_init(struct net *net) | 
| 2357 | { | 2305 | { | 
| 2358 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 2306 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 
| 2359 | struct rhashtable_params rht_params = { | ||
| 2360 | .nelem_hint = 192, | ||
| 2361 | .head_offset = offsetof(struct tipc_sock, node), | ||
| 2362 | .key_offset = offsetof(struct tipc_sock, portid), | ||
| 2363 | .key_len = sizeof(u32), /* portid */ | ||
| 2364 | .hashfn = jhash, | ||
| 2365 | .max_shift = 20, /* 1M */ | ||
| 2366 | .min_shift = 8, /* 256 */ | ||
| 2367 | .grow_decision = rht_grow_above_75, | ||
| 2368 | .shrink_decision = rht_shrink_below_30, | ||
| 2369 | }; | ||
| 2370 | 2307 | ||
| 2371 | return rhashtable_init(&tn->sk_rht, &rht_params); | 2308 | return rhashtable_init(&tn->sk_rht, &tsk_rht_params); | 
| 2372 | } | 2309 | } | 
| 2373 | 2310 | ||
| 2374 | void tipc_sk_rht_destroy(struct net *net) | 2311 | void tipc_sk_rht_destroy(struct net *net) | 
| @@ -2611,12 +2548,6 @@ static struct proto tipc_proto = { | |||
| 2611 | .sysctl_rmem = sysctl_tipc_rmem | 2548 | .sysctl_rmem = sysctl_tipc_rmem | 
| 2612 | }; | 2549 | }; | 
| 2613 | 2550 | ||
| 2614 | static struct proto tipc_proto_kern = { | ||
| 2615 | .name = "TIPC", | ||
| 2616 | .obj_size = sizeof(struct tipc_sock), | ||
| 2617 | .sysctl_rmem = sysctl_tipc_rmem | ||
| 2618 | }; | ||
| 2619 | |||
| 2620 | /** | 2551 | /** | 
| 2621 | * tipc_socket_init - initialize TIPC socket interface | 2552 | * tipc_socket_init - initialize TIPC socket interface | 
| 2622 | * | 2553 | * | 
| diff --git a/net/tipc/socket.h b/net/tipc/socket.h index 238f1b7bd9bd..bf6551389522 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h | |||
| @@ -44,10 +44,6 @@ | |||
| 44 | SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) | 44 | SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) | 
| 45 | int tipc_socket_init(void); | 45 | int tipc_socket_init(void); | 
| 46 | void tipc_socket_stop(void); | 46 | void tipc_socket_stop(void); | 
| 47 | int tipc_sock_create_local(struct net *net, int type, struct socket **res); | ||
| 48 | void tipc_sock_release_local(struct socket *sock); | ||
| 49 | int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, | ||
| 50 | int flags); | ||
| 51 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq); | 47 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq); | 
| 52 | void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, | 48 | void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, | 
| 53 | struct sk_buff_head *inputq); | 49 | struct sk_buff_head *inputq); | 
| diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 72c339e432aa..1c147c869c2e 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
| @@ -162,19 +162,6 @@ static void subscr_del(struct tipc_subscription *sub) | |||
| 162 | atomic_dec(&tn->subscription_count); | 162 | atomic_dec(&tn->subscription_count); | 
| 163 | } | 163 | } | 
| 164 | 164 | ||
| 165 | /** | ||
| 166 | * subscr_terminate - terminate communication with a subscriber | ||
| 167 | * | ||
| 168 | * Note: Must call it in process context since it might sleep. | ||
| 169 | */ | ||
| 170 | static void subscr_terminate(struct tipc_subscription *sub) | ||
| 171 | { | ||
| 172 | struct tipc_subscriber *subscriber = sub->subscriber; | ||
| 173 | struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
| 174 | |||
| 175 | tipc_conn_terminate(tn->topsrv, subscriber->conid); | ||
| 176 | } | ||
| 177 | |||
| 178 | static void subscr_release(struct tipc_subscriber *subscriber) | 165 | static void subscr_release(struct tipc_subscriber *subscriber) | 
| 179 | { | 166 | { | 
| 180 | struct tipc_subscription *sub; | 167 | struct tipc_subscription *sub; | 
| @@ -312,16 +299,14 @@ static void subscr_conn_msg_event(struct net *net, int conid, | |||
| 312 | { | 299 | { | 
| 313 | struct tipc_subscriber *subscriber = usr_data; | 300 | struct tipc_subscriber *subscriber = usr_data; | 
| 314 | struct tipc_subscription *sub = NULL; | 301 | struct tipc_subscription *sub = NULL; | 
| 302 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
| 315 | 303 | ||
| 316 | spin_lock_bh(&subscriber->lock); | 304 | spin_lock_bh(&subscriber->lock); | 
| 317 | if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, | 305 | subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub); | 
| 318 | &sub) < 0) { | ||
| 319 | spin_unlock_bh(&subscriber->lock); | ||
| 320 | subscr_terminate(sub); | ||
| 321 | return; | ||
| 322 | } | ||
| 323 | if (sub) | 306 | if (sub) | 
| 324 | tipc_nametbl_subscribe(sub); | 307 | tipc_nametbl_subscribe(sub); | 
| 308 | else | ||
| 309 | tipc_conn_terminate(tn->topsrv, subscriber->conid); | ||
| 325 | spin_unlock_bh(&subscriber->lock); | 310 | spin_unlock_bh(&subscriber->lock); | 
| 326 | } | 311 | } | 
| 327 | 312 | ||
| diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c new file mode 100644 index 000000000000..66deebc66aa1 --- /dev/null +++ b/net/tipc/udp_media.c | |||
| @@ -0,0 +1,448 @@ | |||
| 1 | /* net/tipc/udp_media.c: IP bearer support for TIPC | ||
| 2 | * | ||
| 3 | * Copyright (c) 2015, Ericsson AB | ||
| 4 | * All rights reserved. | ||
| 5 | * | ||
| 6 | * Redistribution and use in source and binary forms, with or without | ||
| 7 | * modification, are permitted provided that the following conditions are met: | ||
| 8 | * | ||
| 9 | * 1. Redistributions of source code must retain the above copyright | ||
| 10 | * notice, this list of conditions and the following disclaimer. | ||
| 11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 12 | * notice, this list of conditions and the following disclaimer in the | ||
| 13 | * documentation and/or other materials provided with the distribution. | ||
| 14 | * 3. Neither the names of the copyright holders nor the names of its | ||
| 15 | * contributors may be used to endorse or promote products derived from | ||
| 16 | * this software without specific prior written permission. | ||
| 17 | * | ||
| 18 | * Alternatively, this software may be distributed under the terms of the | ||
| 19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 20 | * Software Foundation. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
| 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
| 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 32 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/socket.h> | ||
| 36 | #include <linux/ip.h> | ||
| 37 | #include <linux/udp.h> | ||
| 38 | #include <linux/inet.h> | ||
| 39 | #include <linux/inetdevice.h> | ||
| 40 | #include <linux/igmp.h> | ||
| 41 | #include <linux/kernel.h> | ||
| 42 | #include <linux/workqueue.h> | ||
| 43 | #include <linux/list.h> | ||
| 44 | #include <net/sock.h> | ||
| 45 | #include <net/ip.h> | ||
| 46 | #include <net/udp_tunnel.h> | ||
| 47 | #include <net/addrconf.h> | ||
| 48 | #include <linux/tipc_netlink.h> | ||
| 49 | #include "core.h" | ||
| 50 | #include "bearer.h" | ||
| 51 | |||
| 52 | /* IANA assigned UDP port */ | ||
| 53 | #define UDP_PORT_DEFAULT 6118 | ||
| 54 | |||
| 55 | static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = { | ||
| 56 | [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC}, | ||
| 57 | [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY, | ||
| 58 | .len = sizeof(struct sockaddr_storage)}, | ||
| 59 | [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY, | ||
| 60 | .len = sizeof(struct sockaddr_storage)}, | ||
| 61 | }; | ||
| 62 | |||
| 63 | /** | ||
| 64 | * struct udp_media_addr - IP/UDP addressing information | ||
| 65 | * | ||
| 66 | * This is the bearer level originating address used in neighbor discovery | ||
| 67 | * messages, and all fields should be in network byte order | ||
| 68 | */ | ||
| 69 | struct udp_media_addr { | ||
| 70 | __be16 proto; | ||
| 71 | __be16 udp_port; | ||
| 72 | union { | ||
| 73 | struct in_addr ipv4; | ||
| 74 | struct in6_addr ipv6; | ||
| 75 | }; | ||
| 76 | }; | ||
| 77 | |||
| 78 | /** | ||
| 79 | * struct udp_bearer - ip/udp bearer data structure | ||
| 80 | * @bearer: associated generic tipc bearer | ||
| 81 | * @ubsock: bearer associated socket | ||
| 82 | * @ifindex: local address scope | ||
| 83 | * @work: used to schedule deferred work on a bearer | ||
| 84 | */ | ||
| 85 | struct udp_bearer { | ||
| 86 | struct tipc_bearer __rcu *bearer; | ||
| 87 | struct socket *ubsock; | ||
| 88 | u32 ifindex; | ||
| 89 | struct work_struct work; | ||
| 90 | }; | ||
| 91 | |||
| 92 | /* udp_media_addr_set - convert a ip/udp address to a TIPC media address */ | ||
| 93 | static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, | ||
| 94 | struct udp_media_addr *ua) | ||
| 95 | { | ||
| 96 | memset(addr, 0, sizeof(struct tipc_media_addr)); | ||
| 97 | addr->media_id = TIPC_MEDIA_TYPE_UDP; | ||
| 98 | memcpy(addr->value, ua, sizeof(struct udp_media_addr)); | ||
| 99 | if (ntohs(ua->proto) == ETH_P_IP) { | ||
| 100 | if (ipv4_is_multicast(ua->ipv4.s_addr)) | ||
| 101 | addr->broadcast = 1; | ||
| 102 | } else if (ntohs(ua->proto) == ETH_P_IPV6) { | ||
| 103 | if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST) | ||
| 104 | addr->broadcast = 1; | ||
| 105 | } else { | ||
| 106 | pr_err("Invalid UDP media address\n"); | ||
| 107 | } | ||
| 108 | } | ||
| 109 | |||
| 110 | /* tipc_udp_addr2str - convert ip/udp address to string */ | ||
| 111 | static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size) | ||
| 112 | { | ||
| 113 | struct udp_media_addr *ua = (struct udp_media_addr *)&a->value; | ||
| 114 | |||
| 115 | if (ntohs(ua->proto) == ETH_P_IP) | ||
| 116 | snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port)); | ||
| 117 | else if (ntohs(ua->proto) == ETH_P_IPV6) | ||
| 118 | snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port)); | ||
| 119 | else | ||
| 120 | pr_err("Invalid UDP media address\n"); | ||
| 121 | return 0; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */ | ||
| 125 | static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a, | ||
| 126 | char *msg) | ||
| 127 | { | ||
| 128 | struct udp_media_addr *ua; | ||
| 129 | |||
| 130 | ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET); | ||
| 131 | if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP) | ||
| 132 | return -EINVAL; | ||
| 133 | tipc_udp_media_addr_set(a, ua); | ||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | /* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */ | ||
| 138 | static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a) | ||
| 139 | { | ||
| 140 | memset(msg, 0, TIPC_MEDIA_INFO_SIZE); | ||
| 141 | msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP; | ||
| 142 | memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value, | ||
| 143 | sizeof(struct udp_media_addr)); | ||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | /* tipc_send_msg - enqueue a send request */ | ||
| 148 | static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, | ||
| 149 | struct tipc_bearer *b, | ||
| 150 | struct tipc_media_addr *dest) | ||
| 151 | { | ||
| 152 | int ttl, err = 0; | ||
| 153 | struct udp_bearer *ub; | ||
| 154 | struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value; | ||
| 155 | struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; | ||
| 156 | struct sk_buff *clone; | ||
| 157 | struct rtable *rt; | ||
| 158 | |||
| 159 | clone = skb_clone(skb, GFP_ATOMIC); | ||
| 160 | skb_set_inner_protocol(clone, htons(ETH_P_TIPC)); | ||
| 161 | ub = rcu_dereference_rtnl(b->media_ptr); | ||
| 162 | if (!ub) { | ||
| 163 | err = -ENODEV; | ||
| 164 | goto tx_error; | ||
| 165 | } | ||
| 166 | if (dst->proto == htons(ETH_P_IP)) { | ||
| 167 | struct flowi4 fl = { | ||
| 168 | .daddr = dst->ipv4.s_addr, | ||
| 169 | .saddr = src->ipv4.s_addr, | ||
| 170 | .flowi4_mark = clone->mark, | ||
| 171 | .flowi4_proto = IPPROTO_UDP | ||
| 172 | }; | ||
| 173 | rt = ip_route_output_key(net, &fl); | ||
| 174 | if (IS_ERR(rt)) { | ||
| 175 | err = PTR_ERR(rt); | ||
| 176 | goto tx_error; | ||
| 177 | } | ||
| 178 | ttl = ip4_dst_hoplimit(&rt->dst); | ||
| 179 | err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone, | ||
| 180 | src->ipv4.s_addr, | ||
| 181 | dst->ipv4.s_addr, 0, ttl, 0, | ||
| 182 | src->udp_port, dst->udp_port, | ||
| 183 | false, true); | ||
| 184 | if (err < 0) { | ||
| 185 | ip_rt_put(rt); | ||
| 186 | goto tx_error; | ||
| 187 | } | ||
| 188 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 189 | } else { | ||
| 190 | struct dst_entry *ndst; | ||
| 191 | struct flowi6 fl6 = { | ||
| 192 | .flowi6_oif = ub->ifindex, | ||
| 193 | .daddr = dst->ipv6, | ||
| 194 | .saddr = src->ipv6, | ||
| 195 | .flowi6_proto = IPPROTO_UDP | ||
| 196 | }; | ||
| 197 | err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6); | ||
| 198 | if (err) | ||
| 199 | goto tx_error; | ||
| 200 | ttl = ip6_dst_hoplimit(ndst); | ||
| 201 | err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone, | ||
| 202 | ndst->dev, &src->ipv6, | ||
| 203 | &dst->ipv6, 0, ttl, src->udp_port, | ||
| 204 | dst->udp_port, false); | ||
| 205 | #endif | ||
| 206 | } | ||
| 207 | return err; | ||
| 208 | |||
| 209 | tx_error: | ||
| 210 | kfree_skb(clone); | ||
| 211 | return err; | ||
| 212 | } | ||
| 213 | |||
| 214 | /* tipc_udp_recv - read data from bearer socket */ | ||
| 215 | static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) | ||
| 216 | { | ||
| 217 | struct udp_bearer *ub; | ||
| 218 | struct tipc_bearer *b; | ||
| 219 | |||
| 220 | ub = rcu_dereference_sk_user_data(sk); | ||
| 221 | if (!ub) { | ||
| 222 | pr_err_ratelimited("Failed to get UDP bearer reference"); | ||
| 223 | kfree_skb(skb); | ||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | |||
| 227 | skb_pull(skb, sizeof(struct udphdr)); | ||
| 228 | rcu_read_lock(); | ||
| 229 | b = rcu_dereference_rtnl(ub->bearer); | ||
| 230 | |||
| 231 | if (b) { | ||
| 232 | tipc_rcv(sock_net(sk), skb, b); | ||
| 233 | rcu_read_unlock(); | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | rcu_read_unlock(); | ||
| 237 | kfree_skb(skb); | ||
| 238 | return 0; | ||
| 239 | } | ||
| 240 | |||
| 241 | static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) | ||
| 242 | { | ||
| 243 | int err = 0; | ||
| 244 | struct ip_mreqn mreqn; | ||
| 245 | struct sock *sk = ub->ubsock->sk; | ||
| 246 | |||
| 247 | if (ntohs(remote->proto) == ETH_P_IP) { | ||
| 248 | if (!ipv4_is_multicast(remote->ipv4.s_addr)) | ||
| 249 | return 0; | ||
| 250 | mreqn.imr_multiaddr = remote->ipv4; | ||
| 251 | mreqn.imr_ifindex = ub->ifindex; | ||
| 252 | err = ip_mc_join_group(sk, &mreqn); | ||
| 253 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 254 | } else { | ||
| 255 | if (!ipv6_addr_is_multicast(&remote->ipv6)) | ||
| 256 | return 0; | ||
| 257 | err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex, | ||
| 258 | &remote->ipv6); | ||
| 259 | #endif | ||
| 260 | } | ||
| 261 | return err; | ||
| 262 | } | ||
| 263 | |||
| 264 | /** | ||
| 265 | * parse_options - build local/remote addresses from configuration | ||
| 266 | * @attrs: netlink config data | ||
| 267 | * @ub: UDP bearer instance | ||
| 268 | * @local: local bearer IP address/port | ||
| 269 | * @remote: peer or multicast IP/port | ||
| 270 | */ | ||
| 271 | static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub, | ||
| 272 | struct udp_media_addr *local, | ||
| 273 | struct udp_media_addr *remote) | ||
| 274 | { | ||
| 275 | struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; | ||
| 276 | struct sockaddr_storage *sa_local, *sa_remote; | ||
| 277 | |||
| 278 | if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) | ||
| 279 | goto err; | ||
| 280 | if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, | ||
| 281 | attrs[TIPC_NLA_BEARER_UDP_OPTS], | ||
| 282 | tipc_nl_udp_policy)) | ||
| 283 | goto err; | ||
| 284 | if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) { | ||
| 285 | sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]); | ||
| 286 | sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]); | ||
| 287 | } else { | ||
| 288 | err: | ||
| 289 | pr_err("Invalid UDP bearer configuration"); | ||
| 290 | return -EINVAL; | ||
| 291 | } | ||
| 292 | if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) { | ||
| 293 | struct sockaddr_in *ip4; | ||
| 294 | |||
| 295 | ip4 = (struct sockaddr_in *)sa_local; | ||
| 296 | local->proto = htons(ETH_P_IP); | ||
| 297 | local->udp_port = ip4->sin_port; | ||
| 298 | local->ipv4.s_addr = ip4->sin_addr.s_addr; | ||
| 299 | |||
| 300 | ip4 = (struct sockaddr_in *)sa_remote; | ||
| 301 | remote->proto = htons(ETH_P_IP); | ||
| 302 | remote->udp_port = ip4->sin_port; | ||
| 303 | remote->ipv4.s_addr = ip4->sin_addr.s_addr; | ||
| 304 | return 0; | ||
| 305 | |||
| 306 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 307 | } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) { | ||
| 308 | struct sockaddr_in6 *ip6; | ||
| 309 | |||
| 310 | ip6 = (struct sockaddr_in6 *)sa_local; | ||
| 311 | local->proto = htons(ETH_P_IPV6); | ||
| 312 | local->udp_port = ip6->sin6_port; | ||
| 313 | local->ipv6 = ip6->sin6_addr; | ||
| 314 | ub->ifindex = ip6->sin6_scope_id; | ||
| 315 | |||
| 316 | ip6 = (struct sockaddr_in6 *)sa_remote; | ||
| 317 | remote->proto = htons(ETH_P_IPV6); | ||
| 318 | remote->udp_port = ip6->sin6_port; | ||
| 319 | remote->ipv6 = ip6->sin6_addr; | ||
| 320 | return 0; | ||
| 321 | #endif | ||
| 322 | } | ||
| 323 | return -EADDRNOTAVAIL; | ||
| 324 | } | ||
| 325 | |||
| 326 | /** | ||
| 327 | * tipc_udp_enable - callback to create a new udp bearer instance | ||
| 328 | * @net: network namespace | ||
| 329 | * @b: pointer to generic tipc_bearer | ||
| 330 | * @attrs: netlink bearer configuration | ||
| 331 | * | ||
| 332 | * validate the bearer parameters and initialize the udp bearer | ||
| 333 | * rtnl_lock should be held | ||
| 334 | */ | ||
| 335 | static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, | ||
| 336 | struct nlattr *attrs[]) | ||
| 337 | { | ||
| 338 | int err = -EINVAL; | ||
| 339 | struct udp_bearer *ub; | ||
| 340 | struct udp_media_addr *remote; | ||
| 341 | struct udp_media_addr local = {0}; | ||
| 342 | struct udp_port_cfg udp_conf = {0}; | ||
| 343 | struct udp_tunnel_sock_cfg tuncfg = {NULL}; | ||
| 344 | |||
| 345 | ub = kzalloc(sizeof(*ub), GFP_ATOMIC); | ||
| 346 | if (!ub) | ||
| 347 | return -ENOMEM; | ||
| 348 | |||
| 349 | remote = (struct udp_media_addr *)&b->bcast_addr.value; | ||
| 350 | memset(remote, 0, sizeof(struct udp_media_addr)); | ||
| 351 | err = parse_options(attrs, ub, &local, remote); | ||
| 352 | if (err) | ||
| 353 | goto err; | ||
| 354 | |||
| 355 | b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP; | ||
| 356 | b->bcast_addr.broadcast = 1; | ||
| 357 | rcu_assign_pointer(b->media_ptr, ub); | ||
| 358 | rcu_assign_pointer(ub->bearer, b); | ||
| 359 | tipc_udp_media_addr_set(&b->addr, &local); | ||
| 360 | if (local.proto == htons(ETH_P_IP)) { | ||
| 361 | struct net_device *dev; | ||
| 362 | |||
| 363 | dev = __ip_dev_find(net, local.ipv4.s_addr, false); | ||
| 364 | if (!dev) { | ||
| 365 | err = -ENODEV; | ||
| 366 | goto err; | ||
| 367 | } | ||
| 368 | udp_conf.family = AF_INET; | ||
| 369 | udp_conf.local_ip.s_addr = htonl(INADDR_ANY); | ||
| 370 | udp_conf.use_udp_checksums = false; | ||
| 371 | ub->ifindex = dev->ifindex; | ||
| 372 | b->mtu = dev->mtu - sizeof(struct iphdr) | ||
| 373 | - sizeof(struct udphdr); | ||
| 374 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 375 | } else if (local.proto == htons(ETH_P_IPV6)) { | ||
| 376 | udp_conf.family = AF_INET6; | ||
| 377 | udp_conf.use_udp6_tx_checksums = true; | ||
| 378 | udp_conf.use_udp6_rx_checksums = true; | ||
| 379 | udp_conf.local_ip6 = in6addr_any; | ||
| 380 | b->mtu = 1280; | ||
| 381 | #endif | ||
| 382 | } else { | ||
| 383 | err = -EAFNOSUPPORT; | ||
| 384 | goto err; | ||
| 385 | } | ||
| 386 | udp_conf.local_udp_port = local.udp_port; | ||
| 387 | err = udp_sock_create(net, &udp_conf, &ub->ubsock); | ||
| 388 | if (err) | ||
| 389 | goto err; | ||
| 390 | tuncfg.sk_user_data = ub; | ||
| 391 | tuncfg.encap_type = 1; | ||
| 392 | tuncfg.encap_rcv = tipc_udp_recv; | ||
| 393 | tuncfg.encap_destroy = NULL; | ||
| 394 | setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); | ||
| 395 | |||
| 396 | if (enable_mcast(ub, remote)) | ||
| 397 | goto err; | ||
| 398 | return 0; | ||
| 399 | err: | ||
| 400 | kfree(ub); | ||
| 401 | return err; | ||
| 402 | } | ||
| 403 | |||
| 404 | /* cleanup_bearer - break the socket/bearer association */ | ||
| 405 | static void cleanup_bearer(struct work_struct *work) | ||
| 406 | { | ||
| 407 | struct udp_bearer *ub = container_of(work, struct udp_bearer, work); | ||
| 408 | |||
| 409 | if (ub->ubsock) | ||
| 410 | udp_tunnel_sock_release(ub->ubsock); | ||
| 411 | synchronize_net(); | ||
| 412 | kfree(ub); | ||
| 413 | } | ||
| 414 | |||
| 415 | /* tipc_udp_disable - detach bearer from socket */ | ||
| 416 | static void tipc_udp_disable(struct tipc_bearer *b) | ||
| 417 | { | ||
| 418 | struct udp_bearer *ub; | ||
| 419 | |||
| 420 | ub = rcu_dereference_rtnl(b->media_ptr); | ||
| 421 | if (!ub) { | ||
| 422 | pr_err("UDP bearer instance not found\n"); | ||
| 423 | return; | ||
| 424 | } | ||
| 425 | if (ub->ubsock) | ||
| 426 | sock_set_flag(ub->ubsock->sk, SOCK_DEAD); | ||
| 427 | RCU_INIT_POINTER(b->media_ptr, NULL); | ||
| 428 | RCU_INIT_POINTER(ub->bearer, NULL); | ||
| 429 | |||
| 430 | /* sock_release need to be done outside of rtnl lock */ | ||
| 431 | INIT_WORK(&ub->work, cleanup_bearer); | ||
| 432 | schedule_work(&ub->work); | ||
| 433 | } | ||
| 434 | |||
| 435 | struct tipc_media udp_media_info = { | ||
| 436 | .send_msg = tipc_udp_send_msg, | ||
| 437 | .enable_media = tipc_udp_enable, | ||
| 438 | .disable_media = tipc_udp_disable, | ||
| 439 | .addr2str = tipc_udp_addr2str, | ||
| 440 | .addr2msg = tipc_udp_addr2msg, | ||
| 441 | .msg2addr = tipc_udp_msg2addr, | ||
| 442 | .priority = TIPC_DEF_LINK_PRI, | ||
| 443 | .tolerance = TIPC_DEF_LINK_TOL, | ||
| 444 | .window = TIPC_DEF_LINK_WIN, | ||
| 445 | .type_id = TIPC_MEDIA_TYPE_UDP, | ||
| 446 | .hwaddr_len = 0, | ||
| 447 | .name = "udp" | ||
| 448 | }; | ||
