aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bearer.c12
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/discover.c19
-rw-r--r--net/tipc/link.c231
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tipc/msg.c78
-rw-r--r--net/tipc/msg.h12
-rw-r--r--net/tipc/name_distr.c18
-rw-r--r--net/tipc/name_table.c1
-rw-r--r--net/tipc/name_table.h1
-rw-r--r--net/tipc/net.c45
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/netlink_compat.c9
-rw-r--r--net/tipc/node.c103
-rw-r--r--net/tipc/node.h13
-rw-r--r--net/tipc/socket.c491
-rw-r--r--net/tipc/socket.h4
-rw-r--r--net/tipc/sysctl.c8
-rw-r--r--net/tipc/topsrv.c14
-rw-r--r--net/tipc/trace.c206
-rw-r--r--net/tipc/trace.h431
-rw-r--r--net/tipc/udp_media.c27
23 files changed, 1484 insertions, 249 deletions
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index aca168f2abb1..c86aba0282af 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,7 +9,9 @@ tipc-y += addr.o bcast.o bearer.o \
9 core.o link.o discover.o msg.o \ 9 core.o link.o discover.o msg.o \
10 name_distr.o subscr.o monitor.o name_table.o net.o \ 10 name_distr.o subscr.o monitor.o name_table.o net.o \
11 netlink.o netlink_compat.o node.o socket.o eth_media.o \ 11 netlink.o netlink_compat.o node.o socket.o eth_media.o \
12 topsrv.o socket.o group.o 12 topsrv.o socket.o group.o trace.o
13
14CFLAGS_trace.o += -I$(src)
13 15
14tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o 16tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
15tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o 17tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 645c16052052..d27f30a9a01d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -43,6 +43,7 @@
43#include "bcast.h" 43#include "bcast.h"
44#include "netlink.h" 44#include "netlink.h"
45#include "udp_media.h" 45#include "udp_media.h"
46#include "trace.h"
46 47
47#define MAX_ADDR_STR 60 48#define MAX_ADDR_STR 60
48 49
@@ -99,7 +100,7 @@ static struct tipc_media *media_find_id(u8 type)
99/** 100/**
100 * tipc_media_addr_printf - record media address in print buffer 101 * tipc_media_addr_printf - record media address in print buffer
101 */ 102 */
102void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a) 103int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
103{ 104{
104 char addr_str[MAX_ADDR_STR]; 105 char addr_str[MAX_ADDR_STR];
105 struct tipc_media *m; 106 struct tipc_media *m;
@@ -114,9 +115,10 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
114 115
115 ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id); 116 ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
116 for (i = 0; i < sizeof(a->value); i++) 117 for (i = 0; i < sizeof(a->value); i++)
117 ret += scnprintf(buf - ret, len + ret, 118 ret += scnprintf(buf + ret, len - ret,
118 "-%02x", a->value[i]); 119 "-%x", a->value[i]);
119 } 120 }
121 return ret;
120} 122}
121 123
122/** 124/**
@@ -317,7 +319,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
317 res = tipc_disc_create(net, b, &b->bcast_addr, &skb); 319 res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
318 if (res) { 320 if (res) {
319 bearer_disable(net, b); 321 bearer_disable(net, b);
320 kfree(b);
321 errstr = "failed to create discoverer"; 322 errstr = "failed to create discoverer";
322 goto rejected; 323 goto rejected;
323 } 324 }
@@ -577,7 +578,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
577 rcu_dereference_rtnl(orig_dev->tipc_ptr); 578 rcu_dereference_rtnl(orig_dev->tipc_ptr);
578 if (likely(b && test_bit(0, &b->up) && 579 if (likely(b && test_bit(0, &b->up) &&
579 (skb->pkt_type <= PACKET_MULTICAST))) { 580 (skb->pkt_type <= PACKET_MULTICAST))) {
580 skb->next = NULL; 581 skb_mark_not_on_list(skb);
581 tipc_rcv(dev_net(b->pt.dev), skb, b); 582 tipc_rcv(dev_net(b->pt.dev), skb, b);
582 rcu_read_unlock(); 583 rcu_read_unlock();
583 return NET_RX_SUCCESS; 584 return NET_RX_SUCCESS;
@@ -607,6 +608,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
607 if (!b) 608 if (!b)
608 return NOTIFY_DONE; 609 return NOTIFY_DONE;
609 610
611 trace_tipc_l2_device_event(dev, b, evt);
610 switch (evt) { 612 switch (evt) {
611 case NETDEV_CHANGE: 613 case NETDEV_CHANGE:
612 if (netif_carrier_ok(dev) && netif_oper_up(dev)) { 614 if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 394290cbbb1d..7f4c569594a5 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -207,7 +207,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
207 207
208int tipc_media_set_priority(const char *name, u32 new_value); 208int tipc_media_set_priority(const char *name, u32 new_value);
209int tipc_media_set_window(const char *name, u32 new_value); 209int tipc_media_set_window(const char *name, u32 new_value);
210void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); 210int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
211int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, 211int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
212 struct nlattr *attrs[]); 212 struct nlattr *attrs[]);
213void tipc_disable_l2_media(struct tipc_bearer *b); 213void tipc_disable_l2_media(struct tipc_bearer *b);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2830709957bd..c138d68e8a69 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
166 166
167 /* Apply trial address if we just left trial period */ 167 /* Apply trial address if we just left trial period */
168 if (!trial && !self) { 168 if (!trial && !self) {
169 tipc_net_finalize(net, tn->trial_addr); 169 tipc_sched_net_finalize(net, tn->trial_addr);
170 msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
170 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); 171 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
171 } 172 }
172 173
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
300 goto exit; 301 goto exit;
301 } 302 }
302 303
303 /* Trial period over ? */ 304 /* Did we just leave trial period ? */
304 if (!time_before(jiffies, tn->addr_trial_end)) { 305 if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
305 /* Did we just leave it ? */ 306 mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
306 if (!tipc_own_addr(net)) 307 spin_unlock_bh(&d->lock);
307 tipc_net_finalize(net, tn->trial_addr); 308 tipc_sched_net_finalize(net, tn->trial_addr);
308 309 return;
309 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
310 msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
311 } 310 }
312 311
313 /* Adjust timeout interval according to discovery phase */ 312 /* Adjust timeout interval according to discovery phase */
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
319 d->timer_intv = TIPC_DISC_SLOW; 318 d->timer_intv = TIPC_DISC_SLOW;
320 else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST) 319 else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
321 d->timer_intv = TIPC_DISC_FAST; 320 d->timer_intv = TIPC_DISC_FAST;
321 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
322 msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
322 } 323 }
323 324
324 mod_timer(&d->timer, jiffies + d->timer_intv); 325 mod_timer(&d->timer, jiffies + d->timer_intv);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 201c3b5bc96b..2792a3cae682 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -43,6 +43,7 @@
43#include "discover.h" 43#include "discover.h"
44#include "netlink.h" 44#include "netlink.h"
45#include "monitor.h" 45#include "monitor.h"
46#include "trace.h"
46 47
47#include <linux/pkt_sched.h> 48#include <linux/pkt_sched.h>
48 49
@@ -105,7 +106,7 @@ struct tipc_stats {
105 * @transmitq: queue for sent, non-acked messages 106 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent 107 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages 108 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message 109 * @prev_from: sequence number of most previous retransmission request
109 * @stale_cnt: counter for number of identical retransmit attempts 110 * @stale_cnt: counter for number of identical retransmit attempts
110 * @stale_limit: time when repeated identical retransmits must force link reset 111 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released 112 * @ackers: # of peers that needs to ack each packet before it can be released
@@ -163,7 +164,7 @@ struct tipc_link {
163 u16 limit; 164 u16 limit;
164 } backlog[5]; 165 } backlog[5];
165 u16 snd_nxt; 166 u16 snd_nxt;
166 u16 last_retransm; 167 u16 prev_from;
167 u16 window; 168 u16 window;
168 u16 stale_cnt; 169 u16 stale_cnt;
169 unsigned long stale_limit; 170 unsigned long stale_limit;
@@ -186,9 +187,6 @@ struct tipc_link {
186 u16 acked; 187 u16 acked;
187 struct tipc_link *bc_rcvlink; 188 struct tipc_link *bc_rcvlink;
188 struct tipc_link *bc_sndlink; 189 struct tipc_link *bc_sndlink;
189 unsigned long prev_retr;
190 u16 prev_from;
191 u16 prev_to;
192 u8 nack_state; 190 u8 nack_state;
193 bool bc_peer_is_up; 191 bool bc_peer_is_up;
194 192
@@ -210,7 +208,7 @@ enum {
210 BC_NACK_SND_SUPPRESS, 208 BC_NACK_SND_SUPPRESS,
211}; 209};
212 210
213#define TIPC_BC_RETR_LIMIT 10 /* [ms] */ 211#define TIPC_BC_RETR_LIM msecs_to_jiffies(10) /* [ms] */
214 212
215/* 213/*
216 * Interval between NACKs when packets arrive out of order 214 * Interval between NACKs when packets arrive out of order
@@ -359,9 +357,11 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
359 rcv_l->bc_peer_is_up = true; 357 rcv_l->bc_peer_is_up = true;
360 rcv_l->state = LINK_ESTABLISHED; 358 rcv_l->state = LINK_ESTABLISHED;
361 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 359 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
360 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
362 tipc_link_reset(rcv_l); 361 tipc_link_reset(rcv_l);
363 rcv_l->state = LINK_RESET; 362 rcv_l->state = LINK_RESET;
364 if (!snd_l->ackers) { 363 if (!snd_l->ackers) {
364 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
365 tipc_link_reset(snd_l); 365 tipc_link_reset(snd_l);
366 snd_l->state = LINK_RESET; 366 snd_l->state = LINK_RESET;
367 __skb_queue_purge(xmitq); 367 __skb_queue_purge(xmitq);
@@ -525,6 +525,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
525 525
526 l = *link; 526 l = *link;
527 strcpy(l->name, tipc_bclink_name); 527 strcpy(l->name, tipc_bclink_name);
528 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
528 tipc_link_reset(l); 529 tipc_link_reset(l);
529 l->state = LINK_RESET; 530 l->state = LINK_RESET;
530 l->ackers = 0; 531 l->ackers = 0;
@@ -549,6 +550,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
549int tipc_link_fsm_evt(struct tipc_link *l, int evt) 550int tipc_link_fsm_evt(struct tipc_link *l, int evt)
550{ 551{
551 int rc = 0; 552 int rc = 0;
553 int old_state = l->state;
552 554
553 switch (l->state) { 555 switch (l->state) {
554 case LINK_RESETTING: 556 case LINK_RESETTING:
@@ -695,10 +697,12 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
695 default: 697 default:
696 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 698 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
697 } 699 }
700 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
698 return rc; 701 return rc;
699illegal_evt: 702illegal_evt:
700 pr_err("Illegal FSM event %x in state %x on link %s\n", 703 pr_err("Illegal FSM event %x in state %x on link %s\n",
701 evt, l->state, l->name); 704 evt, l->state, l->name);
705 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
702 return rc; 706 return rc;
703} 707}
704 708
@@ -743,6 +747,18 @@ static void link_profile_stats(struct tipc_link *l)
743 l->stats.msg_length_profile[6]++; 747 l->stats.msg_length_profile[6]++;
744} 748}
745 749
750/**
751 * tipc_link_too_silent - check if link is "too silent"
752 * @l: tipc link to be checked
753 *
754 * Returns true if the link 'silent_intv_cnt' is about to reach the
755 * 'abort_limit' value, otherwise false
756 */
757bool tipc_link_too_silent(struct tipc_link *l)
758{
759 return (l->silent_intv_cnt + 2 > l->abort_limit);
760}
761
746/* tipc_link_timeout - perform periodic task as instructed from node timeout 762/* tipc_link_timeout - perform periodic task as instructed from node timeout
747 */ 763 */
748int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 764int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
@@ -756,6 +772,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
756 u16 bc_acked = l->bc_rcvlink->acked; 772 u16 bc_acked = l->bc_rcvlink->acked;
757 struct tipc_mon_state *mstate = &l->mon_state; 773 struct tipc_mon_state *mstate = &l->mon_state;
758 774
775 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
776 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
759 switch (l->state) { 777 switch (l->state) {
760 case LINK_ESTABLISHED: 778 case LINK_ESTABLISHED:
761 case LINK_SYNCHING: 779 case LINK_SYNCHING:
@@ -818,6 +836,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
818 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); 836 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
819 skb_queue_tail(&l->wakeupq, skb); 837 skb_queue_tail(&l->wakeupq, skb);
820 l->stats.link_congs++; 838 l->stats.link_congs++;
839 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
821 return -ELINKCONG; 840 return -ELINKCONG;
822} 841}
823 842
@@ -948,6 +967,10 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
948 } 967 }
949 __skb_dequeue(list); 968 __skb_dequeue(list);
950 __skb_queue_tail(transmq, skb); 969 __skb_queue_tail(transmq, skb);
970 /* next retransmit attempt */
971 if (link_is_bc_sndlink(l))
972 TIPC_SKB_CB(skb)->nxt_retr =
973 jiffies + TIPC_BC_RETR_LIM;
951 __skb_queue_tail(xmitq, _skb); 974 __skb_queue_tail(xmitq, _skb);
952 TIPC_SKB_CB(skb)->ackers = l->ackers; 975 TIPC_SKB_CB(skb)->ackers = l->ackers;
953 l->rcv_unacked = 0; 976 l->rcv_unacked = 0;
@@ -995,6 +1018,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
995 hdr = buf_msg(skb); 1018 hdr = buf_msg(skb);
996 l->backlog[msg_importance(hdr)].len--; 1019 l->backlog[msg_importance(hdr)].len--;
997 __skb_queue_tail(&l->transmq, skb); 1020 __skb_queue_tail(&l->transmq, skb);
1021 /* next retransmit attempt */
1022 if (link_is_bc_sndlink(l))
1023 TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
1024
998 __skb_queue_tail(xmitq, _skb); 1025 __skb_queue_tail(xmitq, _skb);
999 TIPC_SKB_CB(skb)->ackers = l->ackers; 1026 TIPC_SKB_CB(skb)->ackers = l->ackers;
1000 msg_set_seqno(hdr, seqno); 1027 msg_set_seqno(hdr, seqno);
@@ -1036,14 +1063,20 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1036 1063
1037 if (!skb) 1064 if (!skb)
1038 return 0; 1065 return 0;
1066 if (less(to, from))
1067 return 0;
1039 1068
1069 trace_tipc_link_retrans(r, from, to, &l->transmq);
1040 /* Detect repeated retransmit failures on same packet */ 1070 /* Detect repeated retransmit failures on same packet */
1041 if (r->last_retransm != buf_seqno(skb)) { 1071 if (r->prev_from != from) {
1042 r->last_retransm = buf_seqno(skb); 1072 r->prev_from = from;
1043 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); 1073 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1044 r->stale_cnt = 0; 1074 r->stale_cnt = 0;
1045 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { 1075 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
1046 link_retransmit_failure(l, skb); 1076 link_retransmit_failure(l, skb);
1077 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1078 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1079 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1047 if (link_is_bc_sndlink(l)) 1080 if (link_is_bc_sndlink(l))
1048 return TIPC_LINK_DOWN_EVT; 1081 return TIPC_LINK_DOWN_EVT;
1049 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1082 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
@@ -1055,6 +1088,11 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1055 continue; 1088 continue;
1056 if (more(msg_seqno(hdr), to)) 1089 if (more(msg_seqno(hdr), to))
1057 break; 1090 break;
1091 if (link_is_bc_sndlink(l)) {
1092 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1093 continue;
1094 TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
1095 }
1058 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1096 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1059 if (!_skb) 1097 if (!_skb)
1060 return 0; 1098 return 0;
@@ -1398,6 +1436,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1398 l->stats.sent_nacks++; 1436 l->stats.sent_nacks++;
1399 skb->priority = TC_PRIO_CONTROL; 1437 skb->priority = TC_PRIO_CONTROL;
1400 __skb_queue_tail(xmitq, skb); 1438 __skb_queue_tail(xmitq, skb);
1439 trace_tipc_proto_build(skb, false, l->name);
1401} 1440}
1402 1441
1403void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, 1442void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
@@ -1561,6 +1600,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1561 char *if_name; 1600 char *if_name;
1562 int rc = 0; 1601 int rc = 0;
1563 1602
1603 trace_tipc_proto_rcv(skb, false, l->name);
1564 if (tipc_link_is_blocked(l) || !xmitq) 1604 if (tipc_link_is_blocked(l) || !xmitq)
1565 goto exit; 1605 goto exit;
1566 1606
@@ -1571,8 +1611,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1571 hdr = buf_msg(skb); 1611 hdr = buf_msg(skb);
1572 data = msg_data(hdr); 1612 data = msg_data(hdr);
1573 1613
1574 if (!tipc_link_validate_msg(l, hdr)) 1614 if (!tipc_link_validate_msg(l, hdr)) {
1615 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1616 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
1575 goto exit; 1617 goto exit;
1618 }
1576 1619
1577 switch (mtyp) { 1620 switch (mtyp) {
1578 case RESET_MSG: 1621 case RESET_MSG:
@@ -1594,14 +1637,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1594 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1637 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1595 l->priority = peers_prio; 1638 l->priority = peers_prio;
1596 1639
1597 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1640 /* If peer is going down we want full re-establish cycle */
1598 if (msg_peer_stopping(hdr)) 1641 if (msg_peer_stopping(hdr)) {
1599 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1642 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1600 else if ((mtyp == RESET_MSG) || !link_is_up(l)) 1643 break;
1644 }
1645 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1646 if (mtyp == RESET_MSG || !link_is_up(l))
1601 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1647 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1602 1648
1603 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1649 /* ACTIVATE_MSG takes up link if it was already locally reset */
1604 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) 1650 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1605 rc = TIPC_LINK_UP_EVT; 1651 rc = TIPC_LINK_UP_EVT;
1606 1652
1607 l->peer_session = msg_session(hdr); 1653 l->peer_session = msg_session(hdr);
@@ -1734,42 +1780,6 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1734 l->rcv_nxt = peers_snd_nxt; 1780 l->rcv_nxt = peers_snd_nxt;
1735} 1781}
1736 1782
1737/* link_bc_retr eval()- check if the indicated range can be retransmitted now
1738 * - Adjust permitted range if there is overlap with previous retransmission
1739 */
1740static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1741{
1742 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1743
1744 if (less(*to, *from))
1745 return false;
1746
1747 /* New retransmission request */
1748 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1749 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1750 l->prev_from = *from;
1751 l->prev_to = *to;
1752 l->prev_retr = jiffies;
1753 return true;
1754 }
1755
1756 /* Inside range of previous retransmit */
1757 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1758 return false;
1759
1760 /* Fully or partially outside previous range => exclude overlap */
1761 if (less(*from, l->prev_from)) {
1762 *to = l->prev_from - 1;
1763 l->prev_from = *from;
1764 }
1765 if (more(*to, l->prev_to)) {
1766 *from = l->prev_to + 1;
1767 l->prev_to = *to;
1768 }
1769 l->prev_retr = jiffies;
1770 return true;
1771}
1772
1773/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state 1783/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1774 */ 1784 */
1775int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, 1785int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
@@ -1800,8 +1810,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1800 if (more(peers_snd_nxt, l->rcv_nxt + l->window)) 1810 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1801 return rc; 1811 return rc;
1802 1812
1803 if (link_bc_retr_eval(snd_l, &from, &to)) 1813 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1804 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1805 1814
1806 l->snd_nxt = peers_snd_nxt; 1815 l->snd_nxt = peers_snd_nxt;
1807 if (link_bc_rcv_gap(l)) 1816 if (link_bc_rcv_gap(l))
@@ -1849,6 +1858,7 @@ void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1849 if (!more(acked, l->acked)) 1858 if (!more(acked, l->acked))
1850 return; 1859 return;
1851 1860
1861 trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
1852 /* Skip over packets peer has already acked */ 1862 /* Skip over packets peer has already acked */
1853 skb_queue_walk(&snd_l->transmq, skb) { 1863 skb_queue_walk(&snd_l->transmq, skb) {
1854 if (more(buf_seqno(skb), l->acked)) 1864 if (more(buf_seqno(skb), l->acked))
@@ -2252,3 +2262,122 @@ void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2252{ 2262{
2253 l->abort_limit = limit; 2263 l->abort_limit = limit;
2254} 2264}
2265
2266char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2267{
2268 if (!l)
2269 scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2270 else if (link_is_bc_sndlink(l))
2271 scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2272 else if (link_is_bc_rcvlink(l))
2273 scnprintf(buf, TIPC_MAX_LINK_NAME,
2274 "broadcast-receiver, peer %x", l->addr);
2275 else
2276 memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2277
2278 return buf;
2279}
2280
2281/**
2282 * tipc_link_dump - dump TIPC link data
2283 * @l: tipc link to be dumped
2284 * @dqueues: bitmask to decide if any link queue to be dumped?
2285 * - TIPC_DUMP_NONE: don't dump link queues
2286 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2287 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2288 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2289 * - TIPC_DUMP_INPUTQ: dump link input queue
2290 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2291 * - TIPC_DUMP_ALL: dump all the link queues above
2292 * @buf: returned buffer of dump data in format
2293 */
2294int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2295{
2296 int i = 0;
2297 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2298 struct sk_buff_head *list;
2299 struct sk_buff *hskb, *tskb;
2300 u32 len;
2301
2302 if (!l) {
2303 i += scnprintf(buf, sz, "link data: (null)\n");
2304 return i;
2305 }
2306
2307 i += scnprintf(buf, sz, "link data: %x", l->addr);
2308 i += scnprintf(buf + i, sz - i, " %x", l->state);
2309 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2310 i += scnprintf(buf + i, sz - i, " %u", l->session);
2311 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2312 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2313 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2314 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2315 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2316 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2317 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2318 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2319 i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
2320 i += scnprintf(buf + i, sz - i, " %u", l->stale_cnt);
2321 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2322
2323 list = &l->transmq;
2324 len = skb_queue_len(list);
2325 hskb = skb_peek(list);
2326 tskb = skb_peek_tail(list);
2327 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2328 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2329 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2330
2331 list = &l->deferdq;
2332 len = skb_queue_len(list);
2333 hskb = skb_peek(list);
2334 tskb = skb_peek_tail(list);
2335 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2336 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2337 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2338
2339 list = &l->backlogq;
2340 len = skb_queue_len(list);
2341 hskb = skb_peek(list);
2342 tskb = skb_peek_tail(list);
2343 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2344 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2345 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2346
2347 list = l->inputq;
2348 len = skb_queue_len(list);
2349 hskb = skb_peek(list);
2350 tskb = skb_peek_tail(list);
2351 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2352 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2353 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2354
2355 if (dqueues & TIPC_DUMP_TRANSMQ) {
2356 i += scnprintf(buf + i, sz - i, "transmq: ");
2357 i += tipc_list_dump(&l->transmq, false, buf + i);
2358 }
2359 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2360 i += scnprintf(buf + i, sz - i,
2361 "backlogq: <%u %u %u %u %u>, ",
2362 l->backlog[TIPC_LOW_IMPORTANCE].len,
2363 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2364 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2365 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2366 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2367 i += tipc_list_dump(&l->backlogq, false, buf + i);
2368 }
2369 if (dqueues & TIPC_DUMP_DEFERDQ) {
2370 i += scnprintf(buf + i, sz - i, "deferdq: ");
2371 i += tipc_list_dump(&l->deferdq, false, buf + i);
2372 }
2373 if (dqueues & TIPC_DUMP_INPUTQ) {
2374 i += scnprintf(buf + i, sz - i, "inputq: ");
2375 i += tipc_list_dump(l->inputq, false, buf + i);
2376 }
2377 if (dqueues & TIPC_DUMP_WAKEUP) {
2378 i += scnprintf(buf + i, sz - i, "wakeup: ");
2379 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2380 }
2381
2382 return i;
2383}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 90488c538a4e..8439e0ee53a8 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -109,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
109u16 tipc_link_acked(struct tipc_link *l); 109u16 tipc_link_acked(struct tipc_link *l);
110u32 tipc_link_id(struct tipc_link *l); 110u32 tipc_link_id(struct tipc_link *l);
111char *tipc_link_name(struct tipc_link *l); 111char *tipc_link_name(struct tipc_link *l);
112char *tipc_link_name_ext(struct tipc_link *l, char *buf);
112u32 tipc_link_state(struct tipc_link *l); 113u32 tipc_link_state(struct tipc_link *l);
113char tipc_link_plane(struct tipc_link *l); 114char tipc_link_plane(struct tipc_link *l);
114int tipc_link_prio(struct tipc_link *l); 115int tipc_link_prio(struct tipc_link *l);
@@ -147,4 +148,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
147 struct sk_buff_head *xmitq); 148 struct sk_buff_head *xmitq);
148int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, 149int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
149 struct sk_buff_head *xmitq); 150 struct sk_buff_head *xmitq);
151bool tipc_link_too_silent(struct tipc_link *l);
150#endif 152#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b61891054709..f48e5857210f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -499,54 +499,56 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
499/** 499/**
500 * tipc_msg_reverse(): swap source and destination addresses and add error code 500 * tipc_msg_reverse(): swap source and destination addresses and add error code
501 * @own_node: originating node id for reversed message 501 * @own_node: originating node id for reversed message
502 * @skb: buffer containing message to be reversed; may be replaced. 502 * @skb: buffer containing message to be reversed; will be consumed
503 * @err: error code to be set in message, if any 503 * @err: error code to be set in message, if any
504 * Consumes buffer at failure 504 * Replaces consumed buffer with new one when successful
505 * Returns true if success, otherwise false 505 * Returns true if success, otherwise false
506 */ 506 */
507bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) 507bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
508{ 508{
509 struct sk_buff *_skb = *skb; 509 struct sk_buff *_skb = *skb;
510 struct tipc_msg *hdr; 510 struct tipc_msg *_hdr, *hdr;
511 struct tipc_msg ohdr; 511 int hlen, dlen;
512 int dlen;
513 512
514 if (skb_linearize(_skb)) 513 if (skb_linearize(_skb))
515 goto exit; 514 goto exit;
516 hdr = buf_msg(_skb); 515 _hdr = buf_msg(_skb);
517 dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); 516 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
518 if (msg_dest_droppable(hdr)) 517 hlen = msg_hdr_sz(_hdr);
518
519 if (msg_dest_droppable(_hdr))
519 goto exit; 520 goto exit;
520 if (msg_errcode(hdr)) 521 if (msg_errcode(_hdr))
521 goto exit; 522 goto exit;
522 523
523 /* Take a copy of original header before altering message */ 524 /* Never return SHORT header */
524 memcpy(&ohdr, hdr, msg_hdr_sz(hdr)); 525 if (hlen == SHORT_H_SIZE)
525 526 hlen = BASIC_H_SIZE;
526 /* Never return SHORT header; expand by replacing buffer if necessary */ 527
527 if (msg_short(hdr)) { 528 /* Don't return data along with SYN+, - sender has a clone */
528 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC); 529 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
529 if (!*skb) 530 dlen = 0;
530 goto exit; 531
531 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); 532 /* Allocate new buffer to return */
532 kfree_skb(_skb); 533 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
533 _skb = *skb; 534 if (!*skb)
534 hdr = buf_msg(_skb); 535 goto exit;
535 memcpy(hdr, &ohdr, BASIC_H_SIZE); 536 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
536 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 537 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
537 }
538 538
539 /* Now reverse the concerned fields */ 539 /* Build reverse header in new buffer */
540 hdr = buf_msg(*skb);
541 msg_set_hdr_sz(hdr, hlen);
540 msg_set_errcode(hdr, err); 542 msg_set_errcode(hdr, err);
541 msg_set_non_seq(hdr, 0); 543 msg_set_non_seq(hdr, 0);
542 msg_set_origport(hdr, msg_destport(&ohdr)); 544 msg_set_origport(hdr, msg_destport(_hdr));
543 msg_set_destport(hdr, msg_origport(&ohdr)); 545 msg_set_destport(hdr, msg_origport(_hdr));
544 msg_set_destnode(hdr, msg_prevnode(&ohdr)); 546 msg_set_destnode(hdr, msg_prevnode(_hdr));
545 msg_set_prevnode(hdr, own_node); 547 msg_set_prevnode(hdr, own_node);
546 msg_set_orignode(hdr, own_node); 548 msg_set_orignode(hdr, own_node);
547 msg_set_size(hdr, msg_hdr_sz(hdr) + dlen); 549 msg_set_size(hdr, hlen + dlen);
548 skb_trim(_skb, msg_size(hdr));
549 skb_orphan(_skb); 550 skb_orphan(_skb);
551 kfree_skb(_skb);
550 return true; 552 return true;
551exit: 553exit:
552 kfree_skb(_skb); 554 kfree_skb(_skb);
@@ -554,6 +556,22 @@ exit:
554 return false; 556 return false;
555} 557}
556 558
559bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
560{
561 struct sk_buff *skb, *_skb;
562
563 skb_queue_walk(msg, skb) {
564 _skb = skb_clone(skb, GFP_ATOMIC);
565 if (!_skb) {
566 __skb_queue_purge(cpy);
567 pr_err_ratelimited("Failed to clone buffer chain\n");
568 return false;
569 }
570 __skb_queue_tail(cpy, _skb);
571 }
572 return true;
573}
574
557/** 575/**
558 * tipc_msg_lookup_dest(): try to find new destination for named message 576 * tipc_msg_lookup_dest(): try to find new destination for named message
559 * @skb: the buffer containing the message. 577 * @skb: the buffer containing the message.
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a4e944d59394..a0924956bb61 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -105,6 +105,7 @@ struct tipc_skb_cb {
105 u32 bytes_read; 105 u32 bytes_read;
106 u32 orig_member; 106 u32 orig_member;
107 struct sk_buff *tail; 107 struct sk_buff *tail;
108 unsigned long nxt_retr;
108 bool validated; 109 bool validated;
109 u16 chain_imp; 110 u16 chain_imp;
110 u16 ackers; 111 u16 ackers;
@@ -216,6 +217,16 @@ static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
216 msg_set_bits(m, 0, 20, 1, n); 217 msg_set_bits(m, 0, 20, 1, n);
217} 218}
218 219
220static inline int msg_is_syn(struct tipc_msg *m)
221{
222 return msg_bits(m, 0, 17, 1);
223}
224
225static inline void msg_set_syn(struct tipc_msg *m, u32 d)
226{
227 msg_set_bits(m, 0, 17, 1, d);
228}
229
219static inline int msg_dest_droppable(struct tipc_msg *m) 230static inline int msg_dest_droppable(struct tipc_msg *m)
220{ 231{
221 return msg_bits(m, 0, 19, 1); 232 return msg_bits(m, 0, 19, 1);
@@ -970,6 +981,7 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
970 struct sk_buff_head *cpy); 981 struct sk_buff_head *cpy);
971void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, 982void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
972 struct sk_buff *skb); 983 struct sk_buff *skb);
984bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
973 985
974static inline u16 buf_seqno(struct sk_buff *skb) 986static inline u16 buf_seqno(struct sk_buff *skb)
975{ 987{
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 3cfeb9df64b0..61219f0b9677 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -94,8 +94,9 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
94 list_add_tail_rcu(&publ->binding_node, &nt->node_scope); 94 list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
95 return NULL; 95 return NULL;
96 } 96 }
97 list_add_tail_rcu(&publ->binding_node, &nt->cluster_scope); 97 write_lock_bh(&nt->cluster_scope_lock);
98 98 list_add_tail(&publ->binding_node, &nt->cluster_scope);
99 write_unlock_bh(&nt->cluster_scope_lock);
99 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0); 100 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
100 if (!skb) { 101 if (!skb) {
101 pr_warn("Publication distribution failure\n"); 102 pr_warn("Publication distribution failure\n");
@@ -112,11 +113,13 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
112 */ 113 */
113struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) 114struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
114{ 115{
116 struct name_table *nt = tipc_name_table(net);
115 struct sk_buff *buf; 117 struct sk_buff *buf;
116 struct distr_item *item; 118 struct distr_item *item;
117 119
118 list_del_rcu(&publ->binding_node); 120 write_lock_bh(&nt->cluster_scope_lock);
119 121 list_del(&publ->binding_node);
122 write_unlock_bh(&nt->cluster_scope_lock);
120 if (publ->scope == TIPC_NODE_SCOPE) 123 if (publ->scope == TIPC_NODE_SCOPE)
121 return NULL; 124 return NULL;
122 125
@@ -147,7 +150,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
147 ITEM_SIZE) * ITEM_SIZE; 150 ITEM_SIZE) * ITEM_SIZE;
148 u32 msg_rem = msg_dsz; 151 u32 msg_rem = msg_dsz;
149 152
150 list_for_each_entry_rcu(publ, pls, binding_node) { 153 list_for_each_entry(publ, pls, binding_node) {
151 /* Prepare next buffer: */ 154 /* Prepare next buffer: */
152 if (!skb) { 155 if (!skb) {
153 skb = named_prepare_buf(net, PUBLICATION, msg_rem, 156 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
@@ -189,11 +192,10 @@ void tipc_named_node_up(struct net *net, u32 dnode)
189 192
190 __skb_queue_head_init(&head); 193 __skb_queue_head_init(&head);
191 194
192 rcu_read_lock(); 195 read_lock_bh(&nt->cluster_scope_lock);
193 named_distribute(net, &head, dnode, &nt->cluster_scope); 196 named_distribute(net, &head, dnode, &nt->cluster_scope);
194 rcu_read_unlock();
195
196 tipc_node_xmit(net, &head, dnode, 0); 197 tipc_node_xmit(net, &head, dnode, 0);
198 read_unlock_bh(&nt->cluster_scope_lock);
197} 199}
198 200
199/** 201/**
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 66d5b2c5987a..bff241f03525 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -744,6 +744,7 @@ int tipc_nametbl_init(struct net *net)
744 744
745 INIT_LIST_HEAD(&nt->node_scope); 745 INIT_LIST_HEAD(&nt->node_scope);
746 INIT_LIST_HEAD(&nt->cluster_scope); 746 INIT_LIST_HEAD(&nt->cluster_scope);
747 rwlock_init(&nt->cluster_scope_lock);
747 tn->nametbl = nt; 748 tn->nametbl = nt;
748 spin_lock_init(&tn->nametbl_lock); 749 spin_lock_init(&tn->nametbl_lock);
749 return 0; 750 return 0;
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 892bd750b85f..f79066334cc8 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -100,6 +100,7 @@ struct name_table {
100 struct hlist_head services[TIPC_NAMETBL_SIZE]; 100 struct hlist_head services[TIPC_NAMETBL_SIZE];
101 struct list_head node_scope; 101 struct list_head node_scope;
102 struct list_head cluster_scope; 102 struct list_head cluster_scope;
103 rwlock_t cluster_scope_lock;
103 u32 local_publ_count; 104 u32 local_publ_count;
104}; 105};
105 106
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 62199cf5a56c..f076edb74338 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -104,6 +104,14 @@
104 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
105*/ 105*/
106 106
107struct tipc_net_work {
108 struct work_struct work;
109 struct net *net;
110 u32 addr;
111};
112
113static void tipc_net_finalize(struct net *net, u32 addr);
114
107int tipc_net_init(struct net *net, u8 *node_id, u32 addr) 115int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
108{ 116{
109 if (tipc_own_id(net)) { 117 if (tipc_own_id(net)) {
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
119 return 0; 127 return 0;
120} 128}
121 129
122void tipc_net_finalize(struct net *net, u32 addr) 130static void tipc_net_finalize(struct net *net, u32 addr)
123{ 131{
124 struct tipc_net *tn = tipc_net(net); 132 struct tipc_net *tn = tipc_net(net);
125 133
126 if (!cmpxchg(&tn->node_addr, 0, addr)) { 134 if (cmpxchg(&tn->node_addr, 0, addr))
127 tipc_set_node_addr(net, addr); 135 return;
128 tipc_named_reinit(net); 136 tipc_set_node_addr(net, addr);
129 tipc_sk_reinit(net); 137 tipc_named_reinit(net);
130 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 138 tipc_sk_reinit(net);
131 TIPC_CLUSTER_SCOPE, 0, addr); 139 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
132 } 140 TIPC_CLUSTER_SCOPE, 0, addr);
141}
142
143static void tipc_net_finalize_work(struct work_struct *work)
144{
145 struct tipc_net_work *fwork;
146
147 fwork = container_of(work, struct tipc_net_work, work);
148 tipc_net_finalize(fwork->net, fwork->addr);
149 kfree(fwork);
150}
151
152void tipc_sched_net_finalize(struct net *net, u32 addr)
153{
154 struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
155
156 if (!fwork)
157 return;
158 INIT_WORK(&fwork->work, tipc_net_finalize_work);
159 fwork->net = net;
160 fwork->addr = addr;
161 schedule_work(&fwork->work);
133} 162}
134 163
135void tipc_net_stop(struct net *net) 164void tipc_net_stop(struct net *net)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 09ad02b50bb1..b7f2e364eb99 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -42,7 +42,7 @@
42extern const struct nla_policy tipc_nl_net_policy[]; 42extern const struct nla_policy tipc_nl_net_policy[];
43 43
44int tipc_net_init(struct net *net, u8 *node_id, u32 addr); 44int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
45void tipc_net_finalize(struct net *net, u32 addr); 45void tipc_sched_net_finalize(struct net *net, u32 addr);
46void tipc_net_stop(struct net *net); 46void tipc_net_stop(struct net *net);
47int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 47int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
48int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); 48int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 6376467e78f8..40f5cae623a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -904,6 +904,8 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
904 904
905 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, 905 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
906 TIPC_NL_PUBL_GET); 906 TIPC_NL_PUBL_GET);
907 if (!hdr)
908 return -EMSGSIZE;
907 909
908 nest = nla_nest_start(args, TIPC_NLA_SOCK); 910 nest = nla_nest_start(args, TIPC_NLA_SOCK);
909 if (!nest) { 911 if (!nest) {
@@ -951,8 +953,11 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
951 u32 node; 953 u32 node;
952 struct nlattr *con[TIPC_NLA_CON_MAX + 1]; 954 struct nlattr *con[TIPC_NLA_CON_MAX + 1];
953 955
954 nla_parse_nested(con, TIPC_NLA_CON_MAX, 956 err = nla_parse_nested(con, TIPC_NLA_CON_MAX,
955 sock[TIPC_NLA_SOCK_CON], NULL, NULL); 957 sock[TIPC_NLA_SOCK_CON], NULL, NULL);
958
959 if (err)
960 return err;
956 961
957 node = nla_get_u32(con[TIPC_NLA_CON_NODE]); 962 node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
958 tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>", 963 tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2afc4f8c37a7..db2a6c3e0be9 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -43,6 +43,7 @@
43#include "monitor.h" 43#include "monitor.h"
44#include "discover.h" 44#include "discover.h"
45#include "netlink.h" 45#include "netlink.h"
46#include "trace.h"
46 47
47#define INVALID_NODE_SIG 0x10000 48#define INVALID_NODE_SIG 0x10000
48#define NODE_CLEANUP_AFTER 300000 49#define NODE_CLEANUP_AFTER 300000
@@ -432,6 +433,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
432 break; 433 break;
433 } 434 }
434 list_add_tail_rcu(&n->list, &temp_node->list); 435 list_add_tail_rcu(&n->list, &temp_node->list);
436 trace_tipc_node_create(n, true, " ");
435exit: 437exit:
436 spin_unlock_bh(&tn->node_list_lock); 438 spin_unlock_bh(&tn->node_list_lock);
437 return n; 439 return n;
@@ -459,6 +461,7 @@ static void tipc_node_delete_from_list(struct tipc_node *node)
459 461
460static void tipc_node_delete(struct tipc_node *node) 462static void tipc_node_delete(struct tipc_node *node)
461{ 463{
464 trace_tipc_node_delete(node, true, " ");
462 tipc_node_delete_from_list(node); 465 tipc_node_delete_from_list(node);
463 466
464 del_timer_sync(&node->timer); 467 del_timer_sync(&node->timer);
@@ -584,12 +587,15 @@ static void tipc_node_clear_links(struct tipc_node *node)
584/* tipc_node_cleanup - delete nodes that does not 587/* tipc_node_cleanup - delete nodes that does not
585 * have active links for NODE_CLEANUP_AFTER time 588 * have active links for NODE_CLEANUP_AFTER time
586 */ 589 */
587static int tipc_node_cleanup(struct tipc_node *peer) 590static bool tipc_node_cleanup(struct tipc_node *peer)
588{ 591{
589 struct tipc_net *tn = tipc_net(peer->net); 592 struct tipc_net *tn = tipc_net(peer->net);
590 bool deleted = false; 593 bool deleted = false;
591 594
592 spin_lock_bh(&tn->node_list_lock); 595 /* If lock held by tipc_node_stop() the node will be deleted anyway */
596 if (!spin_trylock_bh(&tn->node_list_lock))
597 return false;
598
593 tipc_node_write_lock(peer); 599 tipc_node_write_lock(peer);
594 600
595 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 601 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
@@ -613,6 +619,7 @@ static void tipc_node_timeout(struct timer_list *t)
613 int bearer_id; 619 int bearer_id;
614 int rc = 0; 620 int rc = 0;
615 621
622 trace_tipc_node_timeout(n, false, " ");
616 if (!node_is_up(n) && tipc_node_cleanup(n)) { 623 if (!node_is_up(n) && tipc_node_cleanup(n)) {
617 /*Removing the reference of Timer*/ 624 /*Removing the reference of Timer*/
618 tipc_node_put(n); 625 tipc_node_put(n);
@@ -621,6 +628,12 @@ static void tipc_node_timeout(struct timer_list *t)
621 628
622 __skb_queue_head_init(&xmitq); 629 __skb_queue_head_init(&xmitq);
623 630
631 /* Initial node interval to value larger (10 seconds), then it will be
632 * recalculated with link lowest tolerance
633 */
634 tipc_node_read_lock(n);
635 n->keepalive_intv = 10000;
636 tipc_node_read_unlock(n);
624 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 637 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
625 tipc_node_read_lock(n); 638 tipc_node_read_lock(n);
626 le = &n->links[bearer_id]; 639 le = &n->links[bearer_id];
@@ -672,6 +685,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
672 685
673 pr_debug("Established link <%s> on network plane %c\n", 686 pr_debug("Established link <%s> on network plane %c\n",
674 tipc_link_name(nl), tipc_link_plane(nl)); 687 tipc_link_name(nl), tipc_link_plane(nl));
688 trace_tipc_node_link_up(n, true, " ");
675 689
676 /* Ensure that a STATE message goes first */ 690 /* Ensure that a STATE message goes first */
677 tipc_link_build_state_msg(nl, xmitq); 691 tipc_link_build_state_msg(nl, xmitq);
@@ -774,6 +788,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
774 if (tipc_link_peer_is_down(l)) 788 if (tipc_link_peer_is_down(l))
775 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 789 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
776 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 790 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
791 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
777 tipc_link_fsm_evt(l, LINK_RESET_EVT); 792 tipc_link_fsm_evt(l, LINK_RESET_EVT);
778 tipc_link_reset(l); 793 tipc_link_reset(l);
779 tipc_link_build_reset_msg(l, xmitq); 794 tipc_link_build_reset_msg(l, xmitq);
@@ -791,6 +806,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
791 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 806 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
792 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 807 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
793 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 808 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
809 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
794 tipc_link_reset(l); 810 tipc_link_reset(l);
795 tipc_link_fsm_evt(l, LINK_RESET_EVT); 811 tipc_link_fsm_evt(l, LINK_RESET_EVT);
796 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 812 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
@@ -823,6 +839,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
823 /* Defuse pending tipc_node_link_up() */ 839 /* Defuse pending tipc_node_link_up() */
824 tipc_link_fsm_evt(l, LINK_RESET_EVT); 840 tipc_link_fsm_evt(l, LINK_RESET_EVT);
825 } 841 }
842 trace_tipc_node_link_down(n, true, "node link down or deleted!");
826 tipc_node_write_unlock(n); 843 tipc_node_write_unlock(n);
827 if (delete) 844 if (delete)
828 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 845 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
@@ -1012,6 +1029,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
1012 *respond = false; 1029 *respond = false;
1013 goto exit; 1030 goto exit;
1014 } 1031 }
1032 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1015 tipc_link_reset(l); 1033 tipc_link_reset(l);
1016 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1034 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1017 if (n->state == NODE_FAILINGOVER) 1035 if (n->state == NODE_FAILINGOVER)
@@ -1051,6 +1069,7 @@ static void tipc_node_reset_links(struct tipc_node *n)
1051 1069
1052 pr_warn("Resetting all links to %x\n", n->addr); 1070 pr_warn("Resetting all links to %x\n", n->addr);
1053 1071
1072 trace_tipc_node_reset_links(n, true, " ");
1054 for (i = 0; i < MAX_BEARERS; i++) { 1073 for (i = 0; i < MAX_BEARERS; i++) {
1055 tipc_node_link_down(n, i, false); 1074 tipc_node_link_down(n, i, false);
1056 } 1075 }
@@ -1226,11 +1245,13 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1226 pr_err("Unknown node fsm state %x\n", state); 1245 pr_err("Unknown node fsm state %x\n", state);
1227 break; 1246 break;
1228 } 1247 }
1248 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1229 n->state = state; 1249 n->state = state;
1230 return; 1250 return;
1231 1251
1232illegal_evt: 1252illegal_evt:
1233 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1253 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1254 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1234} 1255}
1235 1256
1236static void node_lost_contact(struct tipc_node *n, 1257static void node_lost_contact(struct tipc_node *n,
@@ -1244,6 +1265,7 @@ static void node_lost_contact(struct tipc_node *n,
1244 1265
1245 pr_debug("Lost contact with %x\n", n->addr); 1266 pr_debug("Lost contact with %x\n", n->addr);
1246 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1267 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1268 trace_tipc_node_lost_contact(n, true, " ");
1247 1269
1248 /* Clean up broadcast state */ 1270 /* Clean up broadcast state */
1249 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1271 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
@@ -1540,6 +1562,10 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
1540 if (!skb_queue_empty(&be->inputq1)) 1562 if (!skb_queue_empty(&be->inputq1))
1541 tipc_node_mcast_rcv(n); 1563 tipc_node_mcast_rcv(n);
1542 1564
1565 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1566 if (!skb_queue_empty(&n->bc_entry.namedq))
1567 tipc_named_rcv(net, &n->bc_entry.namedq);
1568
1543 /* If reassembly or retransmission failure => reset all links to peer */ 1569 /* If reassembly or retransmission failure => reset all links to peer */
1544 if (rc & TIPC_LINK_DOWN_EVT) 1570 if (rc & TIPC_LINK_DOWN_EVT)
1545 tipc_node_reset_links(n); 1571 tipc_node_reset_links(n);
@@ -1568,6 +1594,10 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1568 struct tipc_media_addr *maddr; 1594 struct tipc_media_addr *maddr;
1569 int pb_id; 1595 int pb_id;
1570 1596
1597 if (trace_tipc_node_check_state_enabled()) {
1598 trace_tipc_skb_dump(skb, false, "skb for node state check");
1599 trace_tipc_node_check_state(n, true, " ");
1600 }
1571 l = n->links[bearer_id].link; 1601 l = n->links[bearer_id].link;
1572 if (!l) 1602 if (!l)
1573 return false; 1603 return false;
@@ -1585,8 +1615,11 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1585 } 1615 }
1586 } 1616 }
1587 1617
1588 if (!tipc_link_validate_msg(l, hdr)) 1618 if (!tipc_link_validate_msg(l, hdr)) {
1619 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1620 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1589 return false; 1621 return false;
1622 }
1590 1623
1591 /* Check and update node accesibility if applicable */ 1624 /* Check and update node accesibility if applicable */
1592 if (state == SELF_UP_PEER_COMING) { 1625 if (state == SELF_UP_PEER_COMING) {
@@ -1616,6 +1649,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1616 syncpt = oseqno + exp_pkts - 1; 1649 syncpt = oseqno + exp_pkts - 1;
1617 if (pl && tipc_link_is_up(pl)) { 1650 if (pl && tipc_link_is_up(pl)) {
1618 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1651 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1652 trace_tipc_node_link_down(n, true,
1653 "node link down <- failover!");
1619 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1654 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1620 tipc_link_inputq(l)); 1655 tipc_link_inputq(l));
1621 } 1656 }
@@ -2422,3 +2457,65 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2422 2457
2423 return skb->len; 2458 return skb->len;
2424} 2459}
2460
2461u32 tipc_node_get_addr(struct tipc_node *node)
2462{
2463 return (node) ? node->addr : 0;
2464}
2465
2466/**
2467 * tipc_node_dump - dump TIPC node data
2468 * @n: tipc node to be dumped
2469 * @more: dump more?
2470 * - false: dump only tipc node data
2471 * - true: dump node link data as well
2472 * @buf: returned buffer of dump data in format
2473 */
2474int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
2475{
2476 int i = 0;
2477 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
2478
2479 if (!n) {
2480 i += scnprintf(buf, sz, "node data: (null)\n");
2481 return i;
2482 }
2483
2484 i += scnprintf(buf, sz, "node data: %x", n->addr);
2485 i += scnprintf(buf + i, sz - i, " %x", n->state);
2486 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
2487 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
2488 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
2489 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
2490 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
2491 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
2492 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
2493 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
2494 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
2495
2496 if (!more)
2497 return i;
2498
2499 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
2500 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
2501 i += scnprintf(buf + i, sz - i, " media: ");
2502 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
2503 i += scnprintf(buf + i, sz - i, "\n");
2504 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
2505 i += scnprintf(buf + i, sz - i, " inputq: ");
2506 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
2507
2508 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
2509 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
2510 i += scnprintf(buf + i, sz - i, " media: ");
2511 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
2512 i += scnprintf(buf + i, sz - i, "\n");
2513 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
2514 i += scnprintf(buf + i, sz - i, " inputq: ");
2515 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
2516
2517 i += scnprintf(buf + i, sz - i, "bclink:\n ");
2518 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
2519
2520 return i;
2521}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 48b3298a248d..4f59a30e989a 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,6 +45,7 @@
45/* Optional capabilities supported by this code version 45/* Optional capabilities supported by this code version
46 */ 46 */
47enum { 47enum {
48 TIPC_SYN_BIT = (1),
48 TIPC_BCAST_SYNCH = (1 << 1), 49 TIPC_BCAST_SYNCH = (1 << 1),
49 TIPC_BCAST_STATE_NACK = (1 << 2), 50 TIPC_BCAST_STATE_NACK = (1 << 2),
50 TIPC_BLOCK_FLOWCTL = (1 << 3), 51 TIPC_BLOCK_FLOWCTL = (1 << 3),
@@ -53,16 +54,18 @@ enum {
53 TIPC_LINK_PROTO_SEQNO = (1 << 6) 54 TIPC_LINK_PROTO_SEQNO = (1 << 6)
54}; 55};
55 56
56#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ 57#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \
57 TIPC_BCAST_STATE_NACK | \ 58 TIPC_BCAST_SYNCH | \
58 TIPC_BCAST_RCAST | \ 59 TIPC_BCAST_STATE_NACK | \
59 TIPC_BLOCK_FLOWCTL | \ 60 TIPC_BCAST_RCAST | \
60 TIPC_NODE_ID128 | \ 61 TIPC_BLOCK_FLOWCTL | \
62 TIPC_NODE_ID128 | \
61 TIPC_LINK_PROTO_SEQNO) 63 TIPC_LINK_PROTO_SEQNO)
62#define INVALID_BEARER_ID -1 64#define INVALID_BEARER_ID -1
63 65
64void tipc_node_stop(struct net *net); 66void tipc_node_stop(struct net *net);
65bool tipc_node_get_id(struct net *net, u32 addr, u8 *id); 67bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
68u32 tipc_node_get_addr(struct tipc_node *node);
66u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr); 69u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
67void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128, 70void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
68 struct tipc_bearer *bearer, 71 struct tipc_bearer *bearer,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 49810fdff4c5..1217c90a363b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -46,8 +46,9 @@
46#include "bcast.h" 46#include "bcast.h"
47#include "netlink.h" 47#include "netlink.h"
48#include "group.h" 48#include "group.h"
49#include "trace.h"
49 50
50#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52#define TIPC_FWD_MSG 1 53#define TIPC_FWD_MSG 1
53#define TIPC_MAX_PORT 0xffffffff 54#define TIPC_MAX_PORT 0xffffffff
@@ -80,7 +81,6 @@ struct sockaddr_pair {
80 * @publications: list of publications for port 81 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime 83 * @pub_count: total # of publications port has made during its lifetime
83 * @probing_state:
84 * @conn_timeout: the time we can wait for an unresponded setup request 84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links 86 * @cong_link_cnt: number of congested links
@@ -102,8 +102,8 @@ struct tipc_sock {
102 struct list_head cong_links; 102 struct list_head cong_links;
103 struct list_head publications; 103 struct list_head publications;
104 u32 pub_count; 104 u32 pub_count;
105 uint conn_timeout;
106 atomic_t dupl_rcvcnt; 105 atomic_t dupl_rcvcnt;
106 u16 conn_timeout;
107 bool probe_unacked; 107 bool probe_unacked;
108 u16 cong_link_cnt; 108 u16 cong_link_cnt;
109 u16 snt_unacked; 109 u16 snt_unacked;
@@ -234,6 +234,7 @@ static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
234 */ 234 */
235static void tsk_advance_rx_queue(struct sock *sk) 235static void tsk_advance_rx_queue(struct sock *sk)
236{ 236{
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
238} 239}
239 240
@@ -248,6 +249,7 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
248 if (!tipc_msg_reverse(onode, &skb, err)) 249 if (!tipc_msg_reverse(onode, &skb, err))
249 return; 250 return;
250 251
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
251 dnode = msg_destnode(buf_msg(skb)); 253 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb)); 254 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
@@ -483,6 +485,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
483 tsk_set_unreliable(tsk, true); 485 tsk_set_unreliable(tsk, true);
484 } 486 }
485 487
488 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
486 return 0; 489 return 0;
487} 490}
488 491
@@ -507,6 +510,9 @@ static void __tipc_shutdown(struct socket *sock, int error)
507 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 510 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
508 !tsk_conn_cong(tsk))); 511 !tsk_conn_cong(tsk)));
509 512
513 /* Remove any pending SYN message */
514 __skb_queue_purge(&sk->sk_write_queue);
515
510 /* Reject all unreceived messages, except on an active connection 516 /* Reject all unreceived messages, except on an active connection
511 * (which disconnects locally & sends a 'FIN+' to peer). 517 * (which disconnects locally & sends a 'FIN+' to peer).
512 */ 518 */
@@ -569,6 +575,7 @@ static int tipc_release(struct socket *sock)
569 tsk = tipc_sk(sk); 575 tsk = tipc_sk(sk);
570 lock_sock(sk); 576 lock_sock(sk);
571 577
578 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
572 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 579 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
573 sk->sk_shutdown = SHUTDOWN_MASK; 580 sk->sk_shutdown = SHUTDOWN_MASK;
574 tipc_sk_leave(tsk); 581 tipc_sk_leave(tsk);
@@ -715,7 +722,8 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
715 struct tipc_sock *tsk = tipc_sk(sk); 722 struct tipc_sock *tsk = tipc_sk(sk);
716 __poll_t revents = 0; 723 __poll_t revents = 0;
717 724
718 sock_poll_wait(file, wait); 725 sock_poll_wait(file, sock, wait);
726 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
719 727
720 if (sk->sk_shutdown & RCV_SHUTDOWN) 728 if (sk->sk_shutdown & RCV_SHUTDOWN)
721 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 729 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
@@ -802,9 +810,12 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
802 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 810 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
803 811
804 /* Send message if build was successful */ 812 /* Send message if build was successful */
805 if (unlikely(rc == dlen)) 813 if (unlikely(rc == dlen)) {
814 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
815 TIPC_DUMP_SK_SNDQ, " ");
806 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 816 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
807 &tsk->cong_link_cnt); 817 &tsk->cong_link_cnt);
818 }
808 819
809 tipc_nlist_purge(&dsts); 820 tipc_nlist_purge(&dsts);
810 821
@@ -878,7 +889,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
878 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 889 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
879 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 890 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
880 struct tipc_sock *tsk = tipc_sk(sk); 891 struct tipc_sock *tsk = tipc_sk(sk);
881 struct tipc_group *grp = tsk->group;
882 struct net *net = sock_net(sk); 892 struct net *net = sock_net(sk);
883 struct tipc_member *mb = NULL; 893 struct tipc_member *mb = NULL;
884 u32 node, port; 894 u32 node, port;
@@ -892,7 +902,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
892 /* Block or return if destination link or member is congested */ 902 /* Block or return if destination link or member is congested */
893 rc = tipc_wait_for_cond(sock, &timeout, 903 rc = tipc_wait_for_cond(sock, &timeout,
894 !tipc_dest_find(&tsk->cong_links, node, 0) && 904 !tipc_dest_find(&tsk->cong_links, node, 0) &&
895 !tipc_group_cong(grp, node, port, blks, &mb)); 905 tsk->group &&
906 !tipc_group_cong(tsk->group, node, port, blks,
907 &mb));
896 if (unlikely(rc)) 908 if (unlikely(rc))
897 return rc; 909 return rc;
898 910
@@ -922,7 +934,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
922 struct tipc_sock *tsk = tipc_sk(sk); 934 struct tipc_sock *tsk = tipc_sk(sk);
923 struct list_head *cong_links = &tsk->cong_links; 935 struct list_head *cong_links = &tsk->cong_links;
924 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 936 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
925 struct tipc_group *grp = tsk->group;
926 struct tipc_msg *hdr = &tsk->phdr; 937 struct tipc_msg *hdr = &tsk->phdr;
927 struct tipc_member *first = NULL; 938 struct tipc_member *first = NULL;
928 struct tipc_member *mbr = NULL; 939 struct tipc_member *mbr = NULL;
@@ -939,9 +950,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
939 type = msg_nametype(hdr); 950 type = msg_nametype(hdr);
940 inst = dest->addr.name.name.instance; 951 inst = dest->addr.name.name.instance;
941 scope = msg_lookup_scope(hdr); 952 scope = msg_lookup_scope(hdr);
942 exclude = tipc_group_exclude(grp);
943 953
944 while (++lookups < 4) { 954 while (++lookups < 4) {
955 exclude = tipc_group_exclude(tsk->group);
956
945 first = NULL; 957 first = NULL;
946 958
947 /* Look for a non-congested destination member, if any */ 959 /* Look for a non-congested destination member, if any */
@@ -950,7 +962,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
950 &dstcnt, exclude, false)) 962 &dstcnt, exclude, false))
951 return -EHOSTUNREACH; 963 return -EHOSTUNREACH;
952 tipc_dest_pop(&dsts, &node, &port); 964 tipc_dest_pop(&dsts, &node, &port);
953 cong = tipc_group_cong(grp, node, port, blks, &mbr); 965 cong = tipc_group_cong(tsk->group, node, port, blks,
966 &mbr);
954 if (!cong) 967 if (!cong)
955 break; 968 break;
956 if (mbr == first) 969 if (mbr == first)
@@ -969,7 +982,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
969 /* Block or return if destination link or member is congested */ 982 /* Block or return if destination link or member is congested */
970 rc = tipc_wait_for_cond(sock, &timeout, 983 rc = tipc_wait_for_cond(sock, &timeout,
971 !tipc_dest_find(cong_links, node, 0) && 984 !tipc_dest_find(cong_links, node, 0) &&
972 !tipc_group_cong(grp, node, port, 985 tsk->group &&
986 !tipc_group_cong(tsk->group, node, port,
973 blks, &mbr)); 987 blks, &mbr));
974 if (unlikely(rc)) 988 if (unlikely(rc))
975 return rc; 989 return rc;
@@ -1004,8 +1018,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1004 struct sock *sk = sock->sk; 1018 struct sock *sk = sock->sk;
1005 struct net *net = sock_net(sk); 1019 struct net *net = sock_net(sk);
1006 struct tipc_sock *tsk = tipc_sk(sk); 1020 struct tipc_sock *tsk = tipc_sk(sk);
1007 struct tipc_group *grp = tsk->group; 1021 struct tipc_nlist *dsts;
1008 struct tipc_nlist *dsts = tipc_group_dests(grp);
1009 struct tipc_mc_method *method = &tsk->mc_method; 1022 struct tipc_mc_method *method = &tsk->mc_method;
1010 bool ack = method->mandatory && method->rcast; 1023 bool ack = method->mandatory && method->rcast;
1011 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1024 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1014,15 +1027,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1014 struct sk_buff_head pkts; 1027 struct sk_buff_head pkts;
1015 int rc = -EHOSTUNREACH; 1028 int rc = -EHOSTUNREACH;
1016 1029
1017 if (!dsts->local && !dsts->remote)
1018 return -EHOSTUNREACH;
1019
1020 /* Block or return if any destination link or member is congested */ 1030 /* Block or return if any destination link or member is congested */
1021 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1031 rc = tipc_wait_for_cond(sock, &timeout,
1022 !tipc_group_bc_cong(grp, blks)); 1032 !tsk->cong_link_cnt && tsk->group &&
1033 !tipc_group_bc_cong(tsk->group, blks));
1023 if (unlikely(rc)) 1034 if (unlikely(rc))
1024 return rc; 1035 return rc;
1025 1036
1037 dsts = tipc_group_dests(tsk->group);
1038 if (!dsts->local && !dsts->remote)
1039 return -EHOSTUNREACH;
1040
1026 /* Complete message header */ 1041 /* Complete message header */
1027 if (dest) { 1042 if (dest) {
1028 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1043 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1034,7 +1049,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1034 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1049 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1035 msg_set_destport(hdr, 0); 1050 msg_set_destport(hdr, 0);
1036 msg_set_destnode(hdr, 0); 1051 msg_set_destnode(hdr, 0);
1037 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1052 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1038 1053
1039 /* Avoid getting stuck with repeated forced replicasts */ 1054 /* Avoid getting stuck with repeated forced replicasts */
1040 msg_set_grp_bc_ack_req(hdr, ack); 1055 msg_set_grp_bc_ack_req(hdr, ack);
@@ -1206,8 +1221,10 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1206 bool conn_cong; 1221 bool conn_cong;
1207 1222
1208 /* Ignore if connection cannot be validated: */ 1223 /* Ignore if connection cannot be validated: */
1209 if (!tsk_peer_msg(tsk, hdr)) 1224 if (!tsk_peer_msg(tsk, hdr)) {
1225 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1210 goto exit; 1226 goto exit;
1227 }
1211 1228
1212 if (unlikely(msg_errcode(hdr))) { 1229 if (unlikely(msg_errcode(hdr))) {
1213 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1230 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
@@ -1329,6 +1346,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1329 tsk->conn_type = dest->addr.name.name.type; 1346 tsk->conn_type = dest->addr.name.name.type;
1330 tsk->conn_instance = dest->addr.name.name.instance; 1347 tsk->conn_instance = dest->addr.name.name.instance;
1331 } 1348 }
1349 msg_set_syn(hdr, 1);
1332 } 1350 }
1333 1351
1334 seq = &dest->addr.nameseq; 1352 seq = &dest->addr.nameseq;
@@ -1371,7 +1389,10 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1371 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1389 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1372 if (unlikely(rc != dlen)) 1390 if (unlikely(rc != dlen))
1373 return rc; 1391 return rc;
1392 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1393 return -ENOMEM;
1374 1394
1395 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1375 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1396 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1376 if (unlikely(rc == -ELINKCONG)) { 1397 if (unlikely(rc == -ELINKCONG)) {
1377 tipc_dest_push(clinks, dnode, 0); 1398 tipc_dest_push(clinks, dnode, 0);
@@ -1449,6 +1470,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1449 if (unlikely(rc != send)) 1470 if (unlikely(rc != send))
1450 break; 1471 break;
1451 1472
1473 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1474 TIPC_DUMP_SK_SNDQ, " ");
1452 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1475 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1453 if (unlikely(rc == -ELINKCONG)) { 1476 if (unlikely(rc == -ELINKCONG)) {
1454 tsk->cong_link_cnt = 1; 1477 tsk->cong_link_cnt = 1;
@@ -1490,6 +1513,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1490 struct net *net = sock_net(sk); 1513 struct net *net = sock_net(sk);
1491 struct tipc_msg *msg = &tsk->phdr; 1514 struct tipc_msg *msg = &tsk->phdr;
1492 1515
1516 msg_set_syn(msg, 0);
1493 msg_set_destnode(msg, peer_node); 1517 msg_set_destnode(msg, peer_node);
1494 msg_set_destport(msg, peer_port); 1518 msg_set_destport(msg, peer_port);
1495 msg_set_type(msg, TIPC_CONN_MSG); 1519 msg_set_type(msg, TIPC_CONN_MSG);
@@ -1501,6 +1525,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1501 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1525 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1502 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1526 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1503 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1527 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1528 __skb_queue_purge(&sk->sk_write_queue);
1504 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1529 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1505 return; 1530 return;
1506 1531
@@ -1548,16 +1573,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1548/** 1573/**
1549 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1574 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1550 * @m: descriptor for message info 1575 * @m: descriptor for message info
1551 * @msg: received message header 1576 * @skb: received message buffer
1552 * @tsk: TIPC port associated with message 1577 * @tsk: TIPC port associated with message
1553 * 1578 *
1554 * Note: Ancillary data is not captured if not requested by receiver. 1579 * Note: Ancillary data is not captured if not requested by receiver.
1555 * 1580 *
1556 * Returns 0 if successful, otherwise errno 1581 * Returns 0 if successful, otherwise errno
1557 */ 1582 */
1558static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1583static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1559 struct tipc_sock *tsk) 1584 struct tipc_sock *tsk)
1560{ 1585{
1586 struct tipc_msg *msg;
1561 u32 anc_data[3]; 1587 u32 anc_data[3];
1562 u32 err; 1588 u32 err;
1563 u32 dest_type; 1589 u32 dest_type;
@@ -1566,6 +1592,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1566 1592
1567 if (likely(m->msg_controllen == 0)) 1593 if (likely(m->msg_controllen == 0))
1568 return 0; 1594 return 0;
1595 msg = buf_msg(skb);
1569 1596
1570 /* Optionally capture errored message object(s) */ 1597 /* Optionally capture errored message object(s) */
1571 err = msg ? msg_errcode(msg) : 0; 1598 err = msg ? msg_errcode(msg) : 0;
@@ -1576,6 +1603,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1576 if (res) 1603 if (res)
1577 return res; 1604 return res;
1578 if (anc_data[1]) { 1605 if (anc_data[1]) {
1606 if (skb_linearize(skb))
1607 return -ENOMEM;
1608 msg = buf_msg(skb);
1579 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1609 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1580 msg_data(msg)); 1610 msg_data(msg));
1581 if (res) 1611 if (res)
@@ -1737,9 +1767,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1737 1767
1738 /* Collect msg meta data, including error code and rejected data */ 1768 /* Collect msg meta data, including error code and rejected data */
1739 tipc_sk_set_orig_addr(m, skb); 1769 tipc_sk_set_orig_addr(m, skb);
1740 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1770 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1741 if (unlikely(rc)) 1771 if (unlikely(rc))
1742 goto exit; 1772 goto exit;
1773 hdr = buf_msg(skb);
1743 1774
1744 /* Capture data if non-error msg, otherwise just set return value */ 1775 /* Capture data if non-error msg, otherwise just set return value */
1745 if (likely(!err)) { 1776 if (likely(!err)) {
@@ -1849,9 +1880,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1849 /* Collect msg meta data, incl. error code and rejected data */ 1880 /* Collect msg meta data, incl. error code and rejected data */
1850 if (!copied) { 1881 if (!copied) {
1851 tipc_sk_set_orig_addr(m, skb); 1882 tipc_sk_set_orig_addr(m, skb);
1852 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1883 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1853 if (rc) 1884 if (rc)
1854 break; 1885 break;
1886 hdr = buf_msg(skb);
1855 } 1887 }
1856 1888
1857 /* Copy data if msg ok, otherwise return error/partial data */ 1889 /* Copy data if msg ok, otherwise return error/partial data */
@@ -1971,91 +2003,90 @@ static void tipc_sk_proto_rcv(struct sock *sk,
1971} 2003}
1972 2004
1973/** 2005/**
1974 * tipc_filter_connect - Handle incoming message for a connection-based socket 2006 * tipc_sk_filter_connect - check incoming message for a connection-based socket
1975 * @tsk: TIPC socket 2007 * @tsk: TIPC socket
1976 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 2008 * @skb: pointer to message buffer.
1977 * 2009 * Returns true if message should be added to receive queue, false otherwise
1978 * Returns true if everything ok, false otherwise
1979 */ 2010 */
1980static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 2011static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1981{ 2012{
1982 struct sock *sk = &tsk->sk; 2013 struct sock *sk = &tsk->sk;
1983 struct net *net = sock_net(sk); 2014 struct net *net = sock_net(sk);
1984 struct tipc_msg *hdr = buf_msg(skb); 2015 struct tipc_msg *hdr = buf_msg(skb);
1985 u32 pport = msg_origport(hdr); 2016 bool con_msg = msg_connected(hdr);
1986 u32 pnode = msg_orignode(hdr); 2017 u32 pport = tsk_peer_port(tsk);
2018 u32 pnode = tsk_peer_node(tsk);
2019 u32 oport = msg_origport(hdr);
2020 u32 onode = msg_orignode(hdr);
2021 int err = msg_errcode(hdr);
2022 unsigned long delay;
1987 2023
1988 if (unlikely(msg_mcast(hdr))) 2024 if (unlikely(msg_mcast(hdr)))
1989 return false; 2025 return false;
1990 2026
1991 switch (sk->sk_state) { 2027 switch (sk->sk_state) {
1992 case TIPC_CONNECTING: 2028 case TIPC_CONNECTING:
1993 /* Accept only ACK or NACK message */ 2029 /* Setup ACK */
1994 if (unlikely(!msg_connected(hdr))) { 2030 if (likely(con_msg)) {
1995 if (pport != tsk_peer_port(tsk) || 2031 if (err)
1996 pnode != tsk_peer_node(tsk)) 2032 break;
1997 return false; 2033 tipc_sk_finish_conn(tsk, oport, onode);
1998 2034 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1999 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2035 /* ACK+ message with data is added to receive queue */
2000 sk->sk_err = ECONNREFUSED; 2036 if (msg_data_sz(hdr))
2001 sk->sk_state_change(sk); 2037 return true;
2002 return true; 2038 /* Empty ACK-, - wake up sleeping connect() and drop */
2003 } 2039 sk->sk_data_ready(sk);
2004 2040 msg_set_dest_droppable(hdr, 1);
2005 if (unlikely(msg_errcode(hdr))) { 2041 return false;
2006 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2007 sk->sk_err = ECONNREFUSED;
2008 sk->sk_state_change(sk);
2009 return true;
2010 }
2011
2012 if (unlikely(!msg_isdata(hdr))) {
2013 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2014 sk->sk_err = EINVAL;
2015 sk->sk_state_change(sk);
2016 return true;
2017 } 2042 }
2043 /* Ignore connectionless message if not from listening socket */
2044 if (oport != pport || onode != pnode)
2045 return false;
2018 2046
2019 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2047 /* Rejected SYN */
2020 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2048 if (err != TIPC_ERR_OVERLOAD)
2021 2049 break;
2022 /* If 'ACK+' message, add to socket receive queue */
2023 if (msg_data_sz(hdr))
2024 return true;
2025
2026 /* If empty 'ACK-' message, wake up sleeping connect() */
2027 sk->sk_data_ready(sk);
2028 2050
2029 /* 'ACK-' message is neither accepted nor rejected: */ 2051 /* Prepare for new setup attempt if we have a SYN clone */
2030 msg_set_dest_droppable(hdr, 1); 2052 if (skb_queue_empty(&sk->sk_write_queue))
2053 break;
2054 get_random_bytes(&delay, 2);
2055 delay %= (tsk->conn_timeout / 4);
2056 delay = msecs_to_jiffies(delay + 100);
2057 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2031 return false; 2058 return false;
2032
2033 case TIPC_OPEN: 2059 case TIPC_OPEN:
2034 case TIPC_DISCONNECTING: 2060 case TIPC_DISCONNECTING:
2035 break; 2061 return false;
2036 case TIPC_LISTEN: 2062 case TIPC_LISTEN:
2037 /* Accept only SYN message */ 2063 /* Accept only SYN message */
2038 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2064 if (!msg_is_syn(hdr) &&
2065 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2066 return false;
2067 if (!con_msg && !err)
2039 return true; 2068 return true;
2040 break; 2069 return false;
2041 case TIPC_ESTABLISHED: 2070 case TIPC_ESTABLISHED:
2042 /* Accept only connection-based messages sent by peer */ 2071 /* Accept only connection-based messages sent by peer */
2043 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2072 if (likely(con_msg && !err && pport == oport && pnode == onode))
2073 return true;
2074 if (!tsk_peer_msg(tsk, hdr))
2044 return false; 2075 return false;
2045 2076 if (!err)
2046 if (unlikely(msg_errcode(hdr))) { 2077 return true;
2047 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2078 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2048 /* Let timer expire on it's own */ 2079 tipc_node_remove_conn(net, pnode, tsk->portid);
2049 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2080 sk->sk_state_change(sk);
2050 tsk->portid);
2051 sk->sk_state_change(sk);
2052 }
2053 return true; 2081 return true;
2054 default: 2082 default:
2055 pr_err("Unknown sk_state %u\n", sk->sk_state); 2083 pr_err("Unknown sk_state %u\n", sk->sk_state);
2056 } 2084 }
2057 2085 /* Abort connection setup attempt */
2058 return false; 2086 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2087 sk->sk_err = ECONNREFUSED;
2088 sk->sk_state_change(sk);
2089 return true;
2059} 2090}
2060 2091
2061/** 2092/**
@@ -2115,6 +2146,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2115 struct sk_buff_head inputq; 2146 struct sk_buff_head inputq;
2116 int limit, err = TIPC_OK; 2147 int limit, err = TIPC_OK;
2117 2148
2149 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2118 TIPC_SKB_CB(skb)->bytes_read = 0; 2150 TIPC_SKB_CB(skb)->bytes_read = 0;
2119 __skb_queue_head_init(&inputq); 2151 __skb_queue_head_init(&inputq);
2120 __skb_queue_tail(&inputq, skb); 2152 __skb_queue_tail(&inputq, skb);
@@ -2134,17 +2166,25 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2134 (!grp && msg_in_group(hdr))) 2166 (!grp && msg_in_group(hdr)))
2135 err = TIPC_ERR_NO_PORT; 2167 err = TIPC_ERR_NO_PORT;
2136 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2168 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2169 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2170 "err_overload2!");
2137 atomic_inc(&sk->sk_drops); 2171 atomic_inc(&sk->sk_drops);
2138 err = TIPC_ERR_OVERLOAD; 2172 err = TIPC_ERR_OVERLOAD;
2139 } 2173 }
2140 2174
2141 if (unlikely(err)) { 2175 if (unlikely(err)) {
2142 tipc_skb_reject(net, err, skb, xmitq); 2176 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2177 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2178 "@filter_rcv!");
2179 __skb_queue_tail(xmitq, skb);
2180 }
2143 err = TIPC_OK; 2181 err = TIPC_OK;
2144 continue; 2182 continue;
2145 } 2183 }
2146 __skb_queue_tail(&sk->sk_receive_queue, skb); 2184 __skb_queue_tail(&sk->sk_receive_queue, skb);
2147 skb_set_owner_r(skb, sk); 2185 skb_set_owner_r(skb, sk);
2186 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2187 "rcvq >90% allocated!");
2148 sk->sk_data_ready(sk); 2188 sk->sk_data_ready(sk);
2149 } 2189 }
2150} 2190}
@@ -2210,14 +2250,21 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2210 if (!sk->sk_backlog.len) 2250 if (!sk->sk_backlog.len)
2211 atomic_set(dcnt, 0); 2251 atomic_set(dcnt, 0);
2212 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2252 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2213 if (likely(!sk_add_backlog(sk, skb, lim))) 2253 if (likely(!sk_add_backlog(sk, skb, lim))) {
2254 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2255 "bklg & rcvq >90% allocated!");
2214 continue; 2256 continue;
2257 }
2215 2258
2259 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2216 /* Overload => reject message back to sender */ 2260 /* Overload => reject message back to sender */
2217 onode = tipc_own_addr(sock_net(sk)); 2261 onode = tipc_own_addr(sock_net(sk));
2218 atomic_inc(&sk->sk_drops); 2262 atomic_inc(&sk->sk_drops);
2219 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2263 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2264 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2265 "@sk_enqueue!");
2220 __skb_queue_tail(xmitq, skb); 2266 __skb_queue_tail(xmitq, skb);
2267 }
2221 break; 2268 break;
2222 } 2269 }
2223} 2270}
@@ -2266,6 +2313,8 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2266 /* Prepare for message rejection */ 2313 /* Prepare for message rejection */
2267 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2314 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2268 continue; 2315 continue;
2316
2317 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2269xmit: 2318xmit:
2270 dnode = msg_destnode(buf_msg(skb)); 2319 dnode = msg_destnode(buf_msg(skb));
2271 tipc_node_xmit_skb(net, skb, dnode, dport); 2320 tipc_node_xmit_skb(net, skb, dnode, dport);
@@ -2539,6 +2588,7 @@ static int tipc_shutdown(struct socket *sock, int how)
2539 2588
2540 lock_sock(sk); 2589 lock_sock(sk);
2541 2590
2591 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2542 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2592 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2543 sk->sk_shutdown = SEND_SHUTDOWN; 2593 sk->sk_shutdown = SEND_SHUTDOWN;
2544 2594
@@ -2557,43 +2607,78 @@ static int tipc_shutdown(struct socket *sock, int how)
2557 return res; 2607 return res;
2558} 2608}
2559 2609
2610static void tipc_sk_check_probing_state(struct sock *sk,
2611 struct sk_buff_head *list)
2612{
2613 struct tipc_sock *tsk = tipc_sk(sk);
2614 u32 pnode = tsk_peer_node(tsk);
2615 u32 pport = tsk_peer_port(tsk);
2616 u32 self = tsk_own_node(tsk);
2617 u32 oport = tsk->portid;
2618 struct sk_buff *skb;
2619
2620 if (tsk->probe_unacked) {
2621 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2622 sk->sk_err = ECONNABORTED;
2623 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2624 sk->sk_state_change(sk);
2625 return;
2626 }
2627 /* Prepare new probe */
2628 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2629 pnode, self, pport, oport, TIPC_OK);
2630 if (skb)
2631 __skb_queue_tail(list, skb);
2632 tsk->probe_unacked = true;
2633 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2634}
2635
2636static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2637{
2638 struct tipc_sock *tsk = tipc_sk(sk);
2639
2640 /* Try again later if dest link is congested */
2641 if (tsk->cong_link_cnt) {
2642 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2643 return;
2644 }
2645 /* Prepare SYN for retransmit */
2646 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2647}
2648
2560static void tipc_sk_timeout(struct timer_list *t) 2649static void tipc_sk_timeout(struct timer_list *t)
2561{ 2650{
2562 struct sock *sk = from_timer(sk, t, sk_timer); 2651 struct sock *sk = from_timer(sk, t, sk_timer);
2563 struct tipc_sock *tsk = tipc_sk(sk); 2652 struct tipc_sock *tsk = tipc_sk(sk);
2564 u32 peer_port = tsk_peer_port(tsk); 2653 u32 pnode = tsk_peer_node(tsk);
2565 u32 peer_node = tsk_peer_node(tsk); 2654 struct sk_buff_head list;
2566 u32 own_node = tsk_own_node(tsk); 2655 int rc = 0;
2567 u32 own_port = tsk->portid;
2568 struct net *net = sock_net(sk);
2569 struct sk_buff *skb = NULL;
2570 2656
2657 skb_queue_head_init(&list);
2571 bh_lock_sock(sk); 2658 bh_lock_sock(sk);
2572 if (!tipc_sk_connected(sk))
2573 goto exit;
2574 2659
2575 /* Try again later if socket is busy */ 2660 /* Try again later if socket is busy */
2576 if (sock_owned_by_user(sk)) { 2661 if (sock_owned_by_user(sk)) {
2577 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2662 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2578 goto exit; 2663 bh_unlock_sock(sk);
2664 return;
2579 } 2665 }
2580 2666
2581 if (tsk->probe_unacked) { 2667 if (sk->sk_state == TIPC_ESTABLISHED)
2582 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2668 tipc_sk_check_probing_state(sk, &list);
2583 tipc_node_remove_conn(net, peer_node, peer_port); 2669 else if (sk->sk_state == TIPC_CONNECTING)
2584 sk->sk_state_change(sk); 2670 tipc_sk_retry_connect(sk, &list);
2585 goto exit; 2671
2586 }
2587 /* Send new probe */
2588 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2589 peer_node, own_node, peer_port, own_port,
2590 TIPC_OK);
2591 tsk->probe_unacked = true;
2592 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2593exit:
2594 bh_unlock_sock(sk); 2672 bh_unlock_sock(sk);
2595 if (skb) 2673
2596 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2674 if (!skb_queue_empty(&list))
2675 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2676
2677 /* SYN messages may cause link congestion */
2678 if (rc == -ELINKCONG) {
2679 tipc_dest_push(&tsk->cong_links, pnode, 0);
2680 tsk->cong_link_cnt = 1;
2681 }
2597 sock_put(sk); 2682 sock_put(sk);
2598} 2683}
2599 2684
@@ -2676,11 +2761,15 @@ void tipc_sk_reinit(struct net *net)
2676 rhashtable_walk_start(&iter); 2761 rhashtable_walk_start(&iter);
2677 2762
2678 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2763 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2679 spin_lock_bh(&tsk->sk.sk_lock.slock); 2764 sock_hold(&tsk->sk);
2765 rhashtable_walk_stop(&iter);
2766 lock_sock(&tsk->sk);
2680 msg = &tsk->phdr; 2767 msg = &tsk->phdr;
2681 msg_set_prevnode(msg, tipc_own_addr(net)); 2768 msg_set_prevnode(msg, tipc_own_addr(net));
2682 msg_set_orignode(msg, tipc_own_addr(net)); 2769 msg_set_orignode(msg, tipc_own_addr(net));
2683 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2770 release_sock(&tsk->sk);
2771 rhashtable_walk_start(&iter);
2772 sock_put(&tsk->sk);
2684 } 2773 }
2685 2774
2686 rhashtable_walk_stop(&iter); 2775 rhashtable_walk_stop(&iter);
@@ -3516,3 +3605,187 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3516 3605
3517 return skb->len; 3606 return skb->len;
3518} 3607}
3608
3609/**
3610 * tipc_sk_filtering - check if a socket should be traced
3611 * @sk: the socket to be examined
3612 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3613 * (portid, sock type, name type, name lower, name upper)
3614 *
3615 * Returns true if the socket meets the socket tuple data
3616 * (value 0 = 'any') or when there is no tuple set (all = 0),
3617 * otherwise false
3618 */
3619bool tipc_sk_filtering(struct sock *sk)
3620{
3621 struct tipc_sock *tsk;
3622 struct publication *p;
3623 u32 _port, _sktype, _type, _lower, _upper;
3624 u32 type = 0, lower = 0, upper = 0;
3625
3626 if (!sk)
3627 return true;
3628
3629 tsk = tipc_sk(sk);
3630
3631 _port = sysctl_tipc_sk_filter[0];
3632 _sktype = sysctl_tipc_sk_filter[1];
3633 _type = sysctl_tipc_sk_filter[2];
3634 _lower = sysctl_tipc_sk_filter[3];
3635 _upper = sysctl_tipc_sk_filter[4];
3636
3637 if (!_port && !_sktype && !_type && !_lower && !_upper)
3638 return true;
3639
3640 if (_port)
3641 return (_port == tsk->portid);
3642
3643 if (_sktype && _sktype != sk->sk_type)
3644 return false;
3645
3646 if (tsk->published) {
3647 p = list_first_entry_or_null(&tsk->publications,
3648 struct publication, binding_sock);
3649 if (p) {
3650 type = p->type;
3651 lower = p->lower;
3652 upper = p->upper;
3653 }
3654 }
3655
3656 if (!tipc_sk_type_connectionless(sk)) {
3657 type = tsk->conn_type;
3658 lower = tsk->conn_instance;
3659 upper = tsk->conn_instance;
3660 }
3661
3662 if ((_type && _type != type) || (_lower && _lower != lower) ||
3663 (_upper && _upper != upper))
3664 return false;
3665
3666 return true;
3667}
3668
3669u32 tipc_sock_get_portid(struct sock *sk)
3670{
3671 return (sk) ? (tipc_sk(sk))->portid : 0;
3672}
3673
3674/**
3675 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3676 * both the rcv and backlog queues are considered
3677 * @sk: tipc sk to be checked
3678 * @skb: tipc msg to be checked
3679 *
3680 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3681 */
3682
3683bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3684{
3685 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3686 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3687 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3688
3689 return (qsize > lim * 90 / 100);
3690}
3691
3692/**
3693 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3694 * only the rcv queue is considered
3695 * @sk: tipc sk to be checked
3696 * @skb: tipc msg to be checked
3697 *
3698 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3699 */
3700
3701bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3702{
3703 unsigned int lim = rcvbuf_limit(sk, skb);
3704 unsigned int qsize = sk_rmem_alloc_get(sk);
3705
3706 return (qsize > lim * 90 / 100);
3707}
3708
3709/**
3710 * tipc_sk_dump - dump TIPC socket
3711 * @sk: tipc sk to be dumped
3712 * @dqueues: bitmask to decide if any socket queue to be dumped?
3713 * - TIPC_DUMP_NONE: don't dump socket queues
3714 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3715 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3716 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3717 * - TIPC_DUMP_ALL: dump all the socket queues above
3718 * @buf: returned buffer of dump data in format
3719 */
3720int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3721{
3722 int i = 0;
3723 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3724 struct tipc_sock *tsk;
3725 struct publication *p;
3726 bool tsk_connected;
3727
3728 if (!sk) {
3729 i += scnprintf(buf, sz, "sk data: (null)\n");
3730 return i;
3731 }
3732
3733 tsk = tipc_sk(sk);
3734 tsk_connected = !tipc_sk_type_connectionless(sk);
3735
3736 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3737 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3738 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3739 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3740 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3741 if (tsk_connected) {
3742 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3743 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3744 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3745 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3746 }
3747 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3748 if (tsk->published) {
3749 p = list_first_entry_or_null(&tsk->publications,
3750 struct publication, binding_sock);
3751 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3752 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3753 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3754 }
3755 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3756 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3757 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3758 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3759 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3760 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3761 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3762 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3763 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3764 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3765 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3766 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3767 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3768 i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
3769
3770 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3771 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3772 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3773 }
3774
3775 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3776 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3777 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3778 }
3779
3780 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3781 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3782 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3783 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3784 i += scnprintf(buf + i, sz - i, " tail ");
3785 i += tipc_skb_dump(sk->sk_backlog.tail, false,
3786 buf + i);
3787 }
3788 }
3789
3790 return i;
3791}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 5e575f205afe..235b9679acee 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -71,4 +71,8 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
71int tipc_dump_start(struct netlink_callback *cb); 71int tipc_dump_start(struct netlink_callback *cb);
72int __tipc_dump_start(struct netlink_callback *cb, struct net *net); 72int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
73int tipc_dump_done(struct netlink_callback *cb); 73int tipc_dump_done(struct netlink_callback *cb);
74u32 tipc_sock_get_portid(struct sock *sk);
75bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
76bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
77
74#endif 78#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 1a779b1e8510..3481e4906bd6 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -34,6 +34,7 @@
34 */ 34 */
35 35
36#include "core.h" 36#include "core.h"
37#include "trace.h"
37 38
38#include <linux/sysctl.h> 39#include <linux/sysctl.h>
39 40
@@ -54,6 +55,13 @@ static struct ctl_table tipc_table[] = {
54 .mode = 0644, 55 .mode = 0644,
55 .proc_handler = proc_dointvec, 56 .proc_handler = proc_dointvec,
56 }, 57 },
58 {
59 .procname = "sk_filter",
60 .data = &sysctl_tipc_sk_filter,
61 .maxlen = sizeof(sysctl_tipc_sk_filter),
62 .mode = 0644,
63 .proc_handler = proc_doulongvec_minmax,
64 },
57 {} 65 {}
58}; 66};
59 67
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 2627b5d812e9..efb16f69bd2c 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -57,16 +57,12 @@
57 * @idr_lock: protect the connection identifier set 57 * @idr_lock: protect the connection identifier set
58 * @idr_in_use: amount of allocated identifier entry 58 * @idr_in_use: amount of allocated identifier entry
59 * @net: network namspace instance 59 * @net: network namspace instance
60 * @rcvbuf_cache: memory cache of server receive buffer 60 * @awork: accept work item
61 * @rcv_wq: receive workqueue 61 * @rcv_wq: receive workqueue
62 * @send_wq: send workqueue 62 * @send_wq: send workqueue
63 * @max_rcvbuf_size: maximum permitted receive message length 63 * @max_rcvbuf_size: maximum permitted receive message length
64 * @tipc_conn_new: callback will be called when new connection is incoming 64 * @listener: topsrv listener socket
65 * @tipc_conn_release: callback will be called before releasing the connection
66 * @tipc_conn_recvmsg: callback will be called when message arrives
67 * @name: server name 65 * @name: server name
68 * @imp: message importance
69 * @type: socket type
70 */ 66 */
71struct tipc_topsrv { 67struct tipc_topsrv {
72 struct idr conn_idr; 68 struct idr conn_idr;
@@ -90,9 +86,7 @@ struct tipc_topsrv {
90 * @server: pointer to connected server 86 * @server: pointer to connected server
91 * @sub_list: lsit to all pertaing subscriptions 87 * @sub_list: lsit to all pertaing subscriptions
92 * @sub_lock: lock protecting the subscription list 88 * @sub_lock: lock protecting the subscription list
93 * @outqueue_lock: control access to the outqueue
94 * @rwork: receive work item 89 * @rwork: receive work item
95 * @rx_action: what to do when connection socket is active
96 * @outqueue: pointer to first outbound message in queue 90 * @outqueue: pointer to first outbound message in queue
97 * @outqueue_lock: control access to the outqueue 91 * @outqueue_lock: control access to the outqueue
98 * @swork: send work item 92 * @swork: send work item
@@ -400,7 +394,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
400 iov.iov_base = &s; 394 iov.iov_base = &s;
401 iov.iov_len = sizeof(s); 395 iov.iov_len = sizeof(s);
402 msg.msg_name = NULL; 396 msg.msg_name = NULL;
403 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len); 397 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
404 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); 398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
405 if (ret == -EWOULDBLOCK) 399 if (ret == -EWOULDBLOCK)
406 return -EWOULDBLOCK; 400 return -EWOULDBLOCK;
@@ -657,7 +651,7 @@ int tipc_topsrv_start(struct net *net)
657 srv->max_rcvbuf_size = sizeof(struct tipc_subscr); 651 srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
658 INIT_WORK(&srv->awork, tipc_topsrv_accept); 652 INIT_WORK(&srv->awork, tipc_topsrv_accept);
659 653
660 strncpy(srv->name, name, strlen(name) + 1); 654 strscpy(srv->name, name, sizeof(srv->name));
661 tn->topsrv = srv; 655 tn->topsrv = srv;
662 atomic_set(&tn->subscription_count, 0); 656 atomic_set(&tn->subscription_count, 0);
663 657
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
new file mode 100644
index 000000000000..964823841efe
--- /dev/null
+++ b/net/tipc/trace.c
@@ -0,0 +1,206 @@
1/*
2 * net/tipc/trace.c: TIPC tracepoints code
3 *
4 * Copyright (c) 2018, Ericsson AB
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
39/**
40 * socket tuples for filtering in socket traces:
41 * (portid, sock type, name type, name lower, name upper)
42 */
43unsigned long sysctl_tipc_sk_filter[5] __read_mostly = {0, };
44
45/**
46 * tipc_skb_dump - dump TIPC skb data
47 * @skb: skb to be dumped
48 * @more: dump more?
49 * - false: dump only tipc msg data
50 * - true: dump kernel-related skb data and tipc cb[] array as well
51 * @buf: returned buffer of dump data in format
52 */
53int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
54{
55 int i = 0;
56 size_t sz = (more) ? SKB_LMAX : SKB_LMIN;
57 struct tipc_msg *hdr;
58 struct tipc_skb_cb *skbcb;
59
60 if (!skb) {
61 i += scnprintf(buf, sz, "msg: (null)\n");
62 return i;
63 }
64
65 hdr = buf_msg(skb);
66 skbcb = TIPC_SKB_CB(skb);
67
68 /* tipc msg data section */
69 i += scnprintf(buf, sz, "msg: %u", msg_user(hdr));
70 i += scnprintf(buf + i, sz - i, " %u", msg_type(hdr));
71 i += scnprintf(buf + i, sz - i, " %u", msg_hdr_sz(hdr));
72 i += scnprintf(buf + i, sz - i, " %u", msg_data_sz(hdr));
73 i += scnprintf(buf + i, sz - i, " %x", msg_orignode(hdr));
74 i += scnprintf(buf + i, sz - i, " %x", msg_destnode(hdr));
75 i += scnprintf(buf + i, sz - i, " %u", msg_seqno(hdr));
76 i += scnprintf(buf + i, sz - i, " %u", msg_ack(hdr));
77 i += scnprintf(buf + i, sz - i, " %u", msg_bcast_ack(hdr));
78 switch (msg_user(hdr)) {
79 case LINK_PROTOCOL:
80 i += scnprintf(buf + i, sz - i, " %c", msg_net_plane(hdr));
81 i += scnprintf(buf + i, sz - i, " %u", msg_probe(hdr));
82 i += scnprintf(buf + i, sz - i, " %u", msg_peer_stopping(hdr));
83 i += scnprintf(buf + i, sz - i, " %u", msg_session(hdr));
84 i += scnprintf(buf + i, sz - i, " %u", msg_next_sent(hdr));
85 i += scnprintf(buf + i, sz - i, " %u", msg_seq_gap(hdr));
86 i += scnprintf(buf + i, sz - i, " %u", msg_bc_snd_nxt(hdr));
87 i += scnprintf(buf + i, sz - i, " %u", msg_bc_gap(hdr));
88 break;
89 case TIPC_LOW_IMPORTANCE:
90 case TIPC_MEDIUM_IMPORTANCE:
91 case TIPC_HIGH_IMPORTANCE:
92 case TIPC_CRITICAL_IMPORTANCE:
93 case CONN_MANAGER:
94 case SOCK_WAKEUP:
95 i += scnprintf(buf + i, sz - i, " | %u", msg_origport(hdr));
96 i += scnprintf(buf + i, sz - i, " %u", msg_destport(hdr));
97 switch (msg_type(hdr)) {
98 case TIPC_NAMED_MSG:
99 i += scnprintf(buf + i, sz - i, " %u",
100 msg_nametype(hdr));
101 i += scnprintf(buf + i, sz - i, " %u",
102 msg_nameinst(hdr));
103 break;
104 case TIPC_MCAST_MSG:
105 i += scnprintf(buf + i, sz - i, " %u",
106 msg_nametype(hdr));
107 i += scnprintf(buf + i, sz - i, " %u",
108 msg_namelower(hdr));
109 i += scnprintf(buf + i, sz - i, " %u",
110 msg_nameupper(hdr));
111 break;
112 default:
113 break;
114 };
115 i += scnprintf(buf + i, sz - i, " | %u",
116 msg_src_droppable(hdr));
117 i += scnprintf(buf + i, sz - i, " %u",
118 msg_dest_droppable(hdr));
119 i += scnprintf(buf + i, sz - i, " %u", msg_errcode(hdr));
120 i += scnprintf(buf + i, sz - i, " %u", msg_reroute_cnt(hdr));
121 break;
122 default:
123 /* need more? */
124 break;
125 };
126
127 i += scnprintf(buf + i, sz - i, "\n");
128 if (!more)
129 return i;
130
131 /* kernel-related skb data section */
132 i += scnprintf(buf + i, sz - i, "skb: %s",
133 (skb->dev) ? skb->dev->name : "n/a");
134 i += scnprintf(buf + i, sz - i, " %u", skb->len);
135 i += scnprintf(buf + i, sz - i, " %u", skb->data_len);
136 i += scnprintf(buf + i, sz - i, " %u", skb->hdr_len);
137 i += scnprintf(buf + i, sz - i, " %u", skb->truesize);
138 i += scnprintf(buf + i, sz - i, " %u", skb_cloned(skb));
139 i += scnprintf(buf + i, sz - i, " %p", skb->sk);
140 i += scnprintf(buf + i, sz - i, " %u", skb_shinfo(skb)->nr_frags);
141 i += scnprintf(buf + i, sz - i, " %llx",
142 ktime_to_ms(skb_get_ktime(skb)));
143 i += scnprintf(buf + i, sz - i, " %llx\n",
144 ktime_to_ms(skb_hwtstamps(skb)->hwtstamp));
145
146 /* tipc skb cb[] data section */
147 i += scnprintf(buf + i, sz - i, "cb[]: %u", skbcb->bytes_read);
148 i += scnprintf(buf + i, sz - i, " %u", skbcb->orig_member);
149 i += scnprintf(buf + i, sz - i, " %u",
150 jiffies_to_msecs(skbcb->nxt_retr));
151 i += scnprintf(buf + i, sz - i, " %u", skbcb->validated);
152 i += scnprintf(buf + i, sz - i, " %u", skbcb->chain_imp);
153 i += scnprintf(buf + i, sz - i, " %u\n", skbcb->ackers);
154
155 return i;
156}
157
158/**
159 * tipc_list_dump - dump TIPC skb list/queue
160 * @list: list of skbs to be dumped
161 * @more: dump more?
162 * - false: dump only the head & tail skbs
163 * - true: dump the first & last 5 skbs
164 * @buf: returned buffer of dump data in format
165 */
166int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf)
167{
168 int i = 0;
169 size_t sz = (more) ? LIST_LMAX : LIST_LMIN;
170 u32 count, len;
171 struct sk_buff *hskb, *tskb, *skb, *tmp;
172
173 if (!list) {
174 i += scnprintf(buf, sz, "(null)\n");
175 return i;
176 }
177
178 len = skb_queue_len(list);
179 i += scnprintf(buf, sz, "len = %d\n", len);
180
181 if (!len)
182 return i;
183
184 if (!more) {
185 hskb = skb_peek(list);
186 i += scnprintf(buf + i, sz - i, " head ");
187 i += tipc_skb_dump(hskb, false, buf + i);
188 if (len > 1) {
189 tskb = skb_peek_tail(list);
190 i += scnprintf(buf + i, sz - i, " tail ");
191 i += tipc_skb_dump(tskb, false, buf + i);
192 }
193 } else {
194 count = 0;
195 skb_queue_walk_safe(list, skb, tmp) {
196 count++;
197 if (count == 6)
198 i += scnprintf(buf + i, sz - i, " .\n .\n");
199 if (count > 5 && count <= len - 5)
200 continue;
201 i += scnprintf(buf + i, sz - i, " #%d ", count);
202 i += tipc_skb_dump(skb, false, buf + i);
203 }
204 }
205 return i;
206}
diff --git a/net/tipc/trace.h b/net/tipc/trace.h
new file mode 100644
index 000000000000..4d8e00483afc
--- /dev/null
+++ b/net/tipc/trace.h
@@ -0,0 +1,431 @@
1/*
2 * net/tipc/trace.h: TIPC tracepoints
3 *
4 * Copyright (c) 2018, Ericsson AB
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#undef TRACE_SYSTEM
37#define TRACE_SYSTEM tipc
38
39#if !defined(_TIPC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
40#define _TIPC_TRACE_H
41
42#include <linux/tracepoint.h>
43#include "core.h"
44#include "link.h"
45#include "socket.h"
46#include "node.h"
47
48#define SKB_LMIN (100)
49#define SKB_LMAX (SKB_LMIN * 2)
50#define LIST_LMIN (SKB_LMIN * 3)
51#define LIST_LMAX (SKB_LMIN * 11)
52#define SK_LMIN (SKB_LMIN * 2)
53#define SK_LMAX (SKB_LMIN * 11)
54#define LINK_LMIN (SKB_LMIN)
55#define LINK_LMAX (SKB_LMIN * 16)
56#define NODE_LMIN (SKB_LMIN)
57#define NODE_LMAX (SKB_LMIN * 11)
58
59#ifndef __TIPC_TRACE_ENUM
60#define __TIPC_TRACE_ENUM
61enum {
62 TIPC_DUMP_NONE = 0,
63
64 TIPC_DUMP_TRANSMQ = 1,
65 TIPC_DUMP_BACKLOGQ = (1 << 1),
66 TIPC_DUMP_DEFERDQ = (1 << 2),
67 TIPC_DUMP_INPUTQ = (1 << 3),
68 TIPC_DUMP_WAKEUP = (1 << 4),
69
70 TIPC_DUMP_SK_SNDQ = (1 << 8),
71 TIPC_DUMP_SK_RCVQ = (1 << 9),
72 TIPC_DUMP_SK_BKLGQ = (1 << 10),
73 TIPC_DUMP_ALL = 0xffffu
74};
75#endif
76
77/* Link & Node FSM states: */
78#define state_sym(val) \
79 __print_symbolic(val, \
80 {(0xe), "ESTABLISHED" },\
81 {(0xe << 4), "ESTABLISHING" },\
82 {(0x1 << 8), "RESET" },\
83 {(0x2 << 12), "RESETTING" },\
84 {(0xd << 16), "PEER_RESET" },\
85 {(0xf << 20), "FAILINGOVER" },\
86 {(0xc << 24), "SYNCHING" },\
87 {(0xdd), "SELF_DOWN_PEER_DOWN" },\
88 {(0xaa), "SELF_UP_PEER_UP" },\
89 {(0xd1), "SELF_DOWN_PEER_LEAVING" },\
90 {(0xac), "SELF_UP_PEER_COMING" },\
91 {(0xca), "SELF_COMING_PEER_UP" },\
92 {(0x1d), "SELF_LEAVING_PEER_DOWN" },\
93 {(0xf0), "FAILINGOVER" },\
94 {(0xcc), "SYNCHING" })
95
96/* Link & Node FSM events: */
97#define evt_sym(val) \
98 __print_symbolic(val, \
99 {(0xec1ab1e), "ESTABLISH_EVT" },\
100 {(0x9eed0e), "PEER_RESET_EVT" },\
101 {(0xfa110e), "FAILURE_EVT" },\
102 {(0x10ca1d0e), "RESET_EVT" },\
103 {(0xfa110bee), "FAILOVER_BEGIN_EVT" },\
104 {(0xfa110ede), "FAILOVER_END_EVT" },\
105 {(0xc1ccbee), "SYNCH_BEGIN_EVT" },\
106 {(0xc1ccede), "SYNCH_END_EVT" },\
107 {(0xece), "SELF_ESTABL_CONTACT_EVT" },\
108 {(0x1ce), "SELF_LOST_CONTACT_EVT" },\
109 {(0x9ece), "PEER_ESTABL_CONTACT_EVT" },\
110 {(0x91ce), "PEER_LOST_CONTACT_EVT" },\
111 {(0xfbe), "FAILOVER_BEGIN_EVT" },\
112 {(0xfee), "FAILOVER_END_EVT" },\
113 {(0xcbe), "SYNCH_BEGIN_EVT" },\
114 {(0xcee), "SYNCH_END_EVT" })
115
116/* Bearer, net device events: */
117#define dev_evt_sym(val) \
118 __print_symbolic(val, \
119 {(NETDEV_CHANGE), "NETDEV_CHANGE" },\
120 {(NETDEV_GOING_DOWN), "NETDEV_GOING_DOWN" },\
121 {(NETDEV_UP), "NETDEV_UP" },\
122 {(NETDEV_CHANGEMTU), "NETDEV_CHANGEMTU" },\
123 {(NETDEV_CHANGEADDR), "NETDEV_CHANGEADDR" },\
124 {(NETDEV_UNREGISTER), "NETDEV_UNREGISTER" },\
125 {(NETDEV_CHANGENAME), "NETDEV_CHANGENAME" })
126
127extern unsigned long sysctl_tipc_sk_filter[5] __read_mostly;
128
129int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf);
130int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf);
131int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf);
132int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf);
133int tipc_node_dump(struct tipc_node *n, bool more, char *buf);
134bool tipc_sk_filtering(struct sock *sk);
135
136DECLARE_EVENT_CLASS(tipc_skb_class,
137
138 TP_PROTO(struct sk_buff *skb, bool more, const char *header),
139
140 TP_ARGS(skb, more, header),
141
142 TP_STRUCT__entry(
143 __string(header, header)
144 __dynamic_array(char, buf, (more) ? SKB_LMAX : SKB_LMIN)
145 ),
146
147 TP_fast_assign(
148 __assign_str(header, header);
149 tipc_skb_dump(skb, more, __get_str(buf));
150 ),
151
152 TP_printk("%s\n%s", __get_str(header), __get_str(buf))
153)
154
155#define DEFINE_SKB_EVENT(name) \
156DEFINE_EVENT(tipc_skb_class, name, \
157 TP_PROTO(struct sk_buff *skb, bool more, const char *header), \
158 TP_ARGS(skb, more, header))
159DEFINE_SKB_EVENT(tipc_skb_dump);
160DEFINE_SKB_EVENT(tipc_proto_build);
161DEFINE_SKB_EVENT(tipc_proto_rcv);
162
163DECLARE_EVENT_CLASS(tipc_list_class,
164
165 TP_PROTO(struct sk_buff_head *list, bool more, const char *header),
166
167 TP_ARGS(list, more, header),
168
169 TP_STRUCT__entry(
170 __string(header, header)
171 __dynamic_array(char, buf, (more) ? LIST_LMAX : LIST_LMIN)
172 ),
173
174 TP_fast_assign(
175 __assign_str(header, header);
176 tipc_list_dump(list, more, __get_str(buf));
177 ),
178
179 TP_printk("%s\n%s", __get_str(header), __get_str(buf))
180);
181
182#define DEFINE_LIST_EVENT(name) \
183DEFINE_EVENT(tipc_list_class, name, \
184 TP_PROTO(struct sk_buff_head *list, bool more, const char *header), \
185 TP_ARGS(list, more, header))
186DEFINE_LIST_EVENT(tipc_list_dump);
187
188DECLARE_EVENT_CLASS(tipc_sk_class,
189
190 TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues,
191 const char *header),
192
193 TP_ARGS(sk, skb, dqueues, header),
194
195 TP_STRUCT__entry(
196 __string(header, header)
197 __field(u32, portid)
198 __dynamic_array(char, buf, (dqueues) ? SK_LMAX : SK_LMIN)
199 __dynamic_array(char, skb_buf, (skb) ? SKB_LMIN : 1)
200 ),
201
202 TP_fast_assign(
203 __assign_str(header, header);
204 __entry->portid = tipc_sock_get_portid(sk);
205 tipc_sk_dump(sk, dqueues, __get_str(buf));
206 if (skb)
207 tipc_skb_dump(skb, false, __get_str(skb_buf));
208 else
209 *(__get_str(skb_buf)) = '\0';
210 ),
211
212 TP_printk("<%u> %s\n%s%s", __entry->portid, __get_str(header),
213 __get_str(skb_buf), __get_str(buf))
214);
215
216#define DEFINE_SK_EVENT_FILTER(name) \
217DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
218 TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
219 const char *header), \
220 TP_ARGS(sk, skb, dqueues, header), \
221 TP_CONDITION(tipc_sk_filtering(sk)))
222DEFINE_SK_EVENT_FILTER(tipc_sk_dump);
223DEFINE_SK_EVENT_FILTER(tipc_sk_create);
224DEFINE_SK_EVENT_FILTER(tipc_sk_sendmcast);
225DEFINE_SK_EVENT_FILTER(tipc_sk_sendmsg);
226DEFINE_SK_EVENT_FILTER(tipc_sk_sendstream);
227DEFINE_SK_EVENT_FILTER(tipc_sk_poll);
228DEFINE_SK_EVENT_FILTER(tipc_sk_filter_rcv);
229DEFINE_SK_EVENT_FILTER(tipc_sk_advance_rx);
230DEFINE_SK_EVENT_FILTER(tipc_sk_rej_msg);
231DEFINE_SK_EVENT_FILTER(tipc_sk_drop_msg);
232DEFINE_SK_EVENT_FILTER(tipc_sk_release);
233DEFINE_SK_EVENT_FILTER(tipc_sk_shutdown);
234
235#define DEFINE_SK_EVENT_FILTER_COND(name, cond) \
236DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
237 TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
238 const char *header), \
239 TP_ARGS(sk, skb, dqueues, header), \
240 TP_CONDITION(tipc_sk_filtering(sk) && (cond)))
241DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
242DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
243
244DECLARE_EVENT_CLASS(tipc_link_class,
245
246 TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header),
247
248 TP_ARGS(l, dqueues, header),
249
250 TP_STRUCT__entry(
251 __string(header, header)
252 __array(char, name, TIPC_MAX_LINK_NAME)
253 __dynamic_array(char, buf, (dqueues) ? LINK_LMAX : LINK_LMIN)
254 ),
255
256 TP_fast_assign(
257 __assign_str(header, header);
258 tipc_link_name_ext(l, __entry->name);
259 tipc_link_dump(l, dqueues, __get_str(buf));
260 ),
261
262 TP_printk("<%s> %s\n%s", __entry->name, __get_str(header),
263 __get_str(buf))
264);
265
266#define DEFINE_LINK_EVENT(name) \
267DEFINE_EVENT(tipc_link_class, name, \
268 TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
269 TP_ARGS(l, dqueues, header))
270DEFINE_LINK_EVENT(tipc_link_dump);
271DEFINE_LINK_EVENT(tipc_link_conges);
272DEFINE_LINK_EVENT(tipc_link_timeout);
273DEFINE_LINK_EVENT(tipc_link_reset);
274
275#define DEFINE_LINK_EVENT_COND(name, cond) \
276DEFINE_EVENT_CONDITION(tipc_link_class, name, \
277 TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
278 TP_ARGS(l, dqueues, header), \
279 TP_CONDITION(cond))
280DEFINE_LINK_EVENT_COND(tipc_link_too_silent, tipc_link_too_silent(l));
281
282DECLARE_EVENT_CLASS(tipc_link_transmq_class,
283
284 TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
285
286 TP_ARGS(r, f, t, tq),
287
288 TP_STRUCT__entry(
289 __array(char, name, TIPC_MAX_LINK_NAME)
290 __field(u16, from)
291 __field(u16, to)
292 __field(u32, len)
293 __field(u16, fseqno)
294 __field(u16, lseqno)
295 ),
296
297 TP_fast_assign(
298 tipc_link_name_ext(r, __entry->name);
299 __entry->from = f;
300 __entry->to = t;
301 __entry->len = skb_queue_len(tq);
302 __entry->fseqno = msg_seqno(buf_msg(skb_peek(tq)));
303 __entry->lseqno = msg_seqno(buf_msg(skb_peek_tail(tq)));
304 ),
305
306 TP_printk("<%s> retrans req: [%u-%u] transmq: %u [%u-%u]\n",
307 __entry->name, __entry->from, __entry->to,
308 __entry->len, __entry->fseqno, __entry->lseqno)
309);
310
311DEFINE_EVENT(tipc_link_transmq_class, tipc_link_retrans,
312 TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
313 TP_ARGS(r, f, t, tq)
314);
315
316DEFINE_EVENT_PRINT(tipc_link_transmq_class, tipc_link_bc_ack,
317 TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
318 TP_ARGS(r, f, t, tq),
319 TP_printk("<%s> acked: [%u-%u] transmq: %u [%u-%u]\n",
320 __entry->name, __entry->from, __entry->to,
321 __entry->len, __entry->fseqno, __entry->lseqno)
322);
323
324DECLARE_EVENT_CLASS(tipc_node_class,
325
326 TP_PROTO(struct tipc_node *n, bool more, const char *header),
327
328 TP_ARGS(n, more, header),
329
330 TP_STRUCT__entry(
331 __string(header, header)
332 __field(u32, addr)
333 __dynamic_array(char, buf, (more) ? NODE_LMAX : NODE_LMIN)
334 ),
335
336 TP_fast_assign(
337 __assign_str(header, header);
338 __entry->addr = tipc_node_get_addr(n);
339 tipc_node_dump(n, more, __get_str(buf));
340 ),
341
342 TP_printk("<%x> %s\n%s", __entry->addr, __get_str(header),
343 __get_str(buf))
344);
345
346#define DEFINE_NODE_EVENT(name) \
347DEFINE_EVENT(tipc_node_class, name, \
348 TP_PROTO(struct tipc_node *n, bool more, const char *header), \
349 TP_ARGS(n, more, header))
350DEFINE_NODE_EVENT(tipc_node_dump);
351DEFINE_NODE_EVENT(tipc_node_create);
352DEFINE_NODE_EVENT(tipc_node_delete);
353DEFINE_NODE_EVENT(tipc_node_lost_contact);
354DEFINE_NODE_EVENT(tipc_node_timeout);
355DEFINE_NODE_EVENT(tipc_node_link_up);
356DEFINE_NODE_EVENT(tipc_node_link_down);
357DEFINE_NODE_EVENT(tipc_node_reset_links);
358DEFINE_NODE_EVENT(tipc_node_check_state);
359
360DECLARE_EVENT_CLASS(tipc_fsm_class,
361
362 TP_PROTO(const char *name, u32 os, u32 ns, int evt),
363
364 TP_ARGS(name, os, ns, evt),
365
366 TP_STRUCT__entry(
367 __string(name, name)
368 __field(u32, os)
369 __field(u32, ns)
370 __field(u32, evt)
371 ),
372
373 TP_fast_assign(
374 __assign_str(name, name);
375 __entry->os = os;
376 __entry->ns = ns;
377 __entry->evt = evt;
378 ),
379
380 TP_printk("<%s> %s--(%s)->%s\n", __get_str(name),
381 state_sym(__entry->os), evt_sym(__entry->evt),
382 state_sym(__entry->ns))
383);
384
385#define DEFINE_FSM_EVENT(fsm_name) \
386DEFINE_EVENT(tipc_fsm_class, fsm_name, \
387 TP_PROTO(const char *name, u32 os, u32 ns, int evt), \
388 TP_ARGS(name, os, ns, evt))
389DEFINE_FSM_EVENT(tipc_link_fsm);
390DEFINE_FSM_EVENT(tipc_node_fsm);
391
392TRACE_EVENT(tipc_l2_device_event,
393
394 TP_PROTO(struct net_device *dev, struct tipc_bearer *b,
395 unsigned long evt),
396
397 TP_ARGS(dev, b, evt),
398
399 TP_STRUCT__entry(
400 __string(dev_name, dev->name)
401 __string(b_name, b->name)
402 __field(unsigned long, evt)
403 __field(u8, b_up)
404 __field(u8, carrier)
405 __field(u8, oper)
406 ),
407
408 TP_fast_assign(
409 __assign_str(dev_name, dev->name);
410 __assign_str(b_name, b->name);
411 __entry->evt = evt;
412 __entry->b_up = test_bit(0, &b->up);
413 __entry->carrier = netif_carrier_ok(dev);
414 __entry->oper = netif_oper_up(dev);
415 ),
416
417 TP_printk("%s on: <%s>/<%s> oper: %s carrier: %s bearer: %s\n",
418 dev_evt_sym(__entry->evt), __get_str(dev_name),
419 __get_str(b_name), (__entry->oper) ? "up" : "down",
420 (__entry->carrier) ? "ok" : "notok",
421 (__entry->b_up) ? "up" : "down")
422);
423
424#endif /* _TIPC_TRACE_H */
425
426/* This part must be outside protection */
427#undef TRACE_INCLUDE_PATH
428#define TRACE_INCLUDE_PATH .
429#undef TRACE_INCLUDE_FILE
430#define TRACE_INCLUDE_FILE trace
431#include <trace/define_trace.h>
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9783101bc4a9..4d85d71f16e2 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
245 } 245 }
246 246
247 err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr); 247 err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
248 if (err) { 248 if (err)
249 kfree_skb(_skb);
250 goto out; 249 goto out;
251 }
252 } 250 }
253 err = 0; 251 err = 0;
254out: 252out:
@@ -650,6 +648,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
650 struct udp_tunnel_sock_cfg tuncfg = {NULL}; 648 struct udp_tunnel_sock_cfg tuncfg = {NULL};
651 struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; 649 struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
652 u8 node_id[NODE_ID_LEN] = {0,}; 650 u8 node_id[NODE_ID_LEN] = {0,};
651 int rmcast = 0;
653 652
654 ub = kzalloc(sizeof(*ub), GFP_ATOMIC); 653 ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
655 if (!ub) 654 if (!ub)
@@ -680,6 +679,14 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
680 if (err) 679 if (err)
681 goto err; 680 goto err;
682 681
682 if (remote.proto != local.proto) {
683 err = -EINVAL;
684 goto err;
685 }
686
687 /* Checking remote ip address */
688 rmcast = tipc_udp_is_mcast_addr(&remote);
689
683 /* Autoconfigure own node identity if needed */ 690 /* Autoconfigure own node identity if needed */
684 if (!tipc_own_id(net)) { 691 if (!tipc_own_id(net)) {
685 memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16); 692 memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
@@ -705,7 +712,12 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
705 goto err; 712 goto err;
706 } 713 }
707 udp_conf.family = AF_INET; 714 udp_conf.family = AF_INET;
708 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 715
716 /* Switch to use ANY to receive packets from group */
717 if (rmcast)
718 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
719 else
720 udp_conf.local_ip.s_addr = local.ipv4.s_addr;
709 udp_conf.use_udp_checksums = false; 721 udp_conf.use_udp_checksums = false;
710 ub->ifindex = dev->ifindex; 722 ub->ifindex = dev->ifindex;
711 if (tipc_mtu_bad(dev, sizeof(struct iphdr) + 723 if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
@@ -719,7 +731,10 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
719 udp_conf.family = AF_INET6; 731 udp_conf.family = AF_INET6;
720 udp_conf.use_udp6_tx_checksums = true; 732 udp_conf.use_udp6_tx_checksums = true;
721 udp_conf.use_udp6_rx_checksums = true; 733 udp_conf.use_udp6_rx_checksums = true;
722 udp_conf.local_ip6 = in6addr_any; 734 if (rmcast)
735 udp_conf.local_ip6 = in6addr_any;
736 else
737 udp_conf.local_ip6 = local.ipv6;
723 b->mtu = 1280; 738 b->mtu = 1280;
724#endif 739#endif
725 } else { 740 } else {
@@ -741,7 +756,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
741 * is used if it's a multicast address. 756 * is used if it's a multicast address.
742 */ 757 */
743 memcpy(&b->bcast_addr.value, &remote, sizeof(remote)); 758 memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
744 if (tipc_udp_is_mcast_addr(&remote)) 759 if (rmcast)
745 err = enable_mcast(ub, &remote); 760 err = enable_mcast(ub, &remote);
746 else 761 else
747 err = tipc_udp_rcast_add(b, &remote); 762 err = tipc_udp_rcast_add(b, &remote);