diff options
author | Jon Paul Maloy <jon.maloy@ericsson.com> | 2015-07-30 18:24:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-07-30 20:25:14 -0400 |
commit | 662921cd0a53db4504838dfbb7d996f9e6e94001 (patch) | |
tree | 22093f4240391972cd6bcf879cbdfe963a73bdb1 /net/tipc/node.c | |
parent | 5045f7b9009f1455268b98cecbcc271663934c85 (diff) |
tipc: merge link->exec_mode and link->state into one FSM
Until now, we have been handling link failover and synchronization
by using an additional link state variable, "exec_mode". This variable
is not independent of the link FSM state, something causing a risk of
inconsistencies, apart from the fact that it clutters the code.
The conditions are now in place to define a new link FSM that covers
all existing use cases, including failover and synchronization, and
eliminate the "exec_mode" field altogether. The FSM must also support
non-atomic resetting of links, which will be introduced later.
The new link FSM is shown below, with 7 states and 8 events.
Only events leading to state change are shown as edges.
+------------------------------------+
|RESET_EVT |
| |
| +--------------+
| +-----------------| SYNCHING |-----------------+
| |FAILURE_EVT +--------------+ PEER_RESET_EVT|
| | A | |
| | | | |
| | | | |
| | |SYNCH_ |SYNCH_ |
| | |BEGIN_EVT |END_EVT |
| | | | |
| V | V V
| +-------------+ +--------------+ +------------+
| | RESETTING |<---------| ESTABLISHED |--------->| PEER_RESET |
| +-------------+ FAILURE_ +--------------+ PEER_ +------------+
| | EVT | A RESET_EVT |
| | | | |
| | | | |
| | +--------------+ | |
| RESET_EVT| |RESET_EVT |ESTABLISH_EVT |
| | | | |
| | | | |
| V V | |
| +-------------+ +--------------+ RESET_EVT|
+--->| RESET |--------->| ESTABLISHING |<----------------+
+-------------+ PEER_ +--------------+
| A RESET_EVT |
| | |
| | |
|FAILOVER_ |FAILOVER_ |FAILOVER_
|BEGIN_EVT |END_EVT |BEGIN_EVT
| | |
V | |
+-------------+ |
| FAILINGOVER |<----------------+
+-------------+
These changes are fully backwards compatible.
Tested-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index 9e20acffb3d4..a3ceeda2a80a 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -334,7 +334,6 @@ static void tipc_node_link_up(struct tipc_node *n, int bearer_id, | |||
334 | if (!ol) { | 334 | if (!ol) { |
335 | *slot0 = bearer_id; | 335 | *slot0 = bearer_id; |
336 | *slot1 = bearer_id; | 336 | *slot1 = bearer_id; |
337 | nl->exec_mode = TIPC_LINK_OPEN; | ||
338 | tipc_link_build_bcast_sync_msg(nl, xmitq); | 337 | tipc_link_build_bcast_sync_msg(nl, xmitq); |
339 | node_established_contact(n); | 338 | node_established_contact(n); |
340 | return; | 339 | return; |
@@ -368,7 +367,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id) | |||
368 | struct sk_buff_head xmitq; | 367 | struct sk_buff_head xmitq; |
369 | 368 | ||
370 | l = n->links[bearer_id].link; | 369 | l = n->links[bearer_id].link; |
371 | if (!l || !tipc_link_is_up(l)) | 370 | if (!l || tipc_link_is_reset(l)) |
372 | return; | 371 | return; |
373 | 372 | ||
374 | __skb_queue_head_init(&xmitq); | 373 | __skb_queue_head_init(&xmitq); |
@@ -414,6 +413,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id) | |||
414 | n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1); | 413 | n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1); |
415 | tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, &xmitq); | 414 | tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, &xmitq); |
416 | tipc_link_reset(l); | 415 | tipc_link_reset(l); |
416 | tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); | ||
417 | tipc_bearer_xmit(n->net, tnl->bearer_id, &xmitq, maddr); | 417 | tipc_bearer_xmit(n->net, tnl->bearer_id, &xmitq, maddr); |
418 | } | 418 | } |
419 | 419 | ||
@@ -749,7 +749,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
749 | struct tipc_link *l_ptr = n_ptr->links[i].link; | 749 | struct tipc_link *l_ptr = n_ptr->links[i].link; |
750 | if (!l_ptr) | 750 | if (!l_ptr) |
751 | continue; | 751 | continue; |
752 | l_ptr->exec_mode = TIPC_LINK_OPEN; | 752 | tipc_link_fsm_evt(l_ptr, LINK_FAILOVER_END_EVT); |
753 | kfree_skb(l_ptr->failover_reasm_skb); | 753 | kfree_skb(l_ptr->failover_reasm_skb); |
754 | l_ptr->failover_reasm_skb = NULL; | 754 | l_ptr->failover_reasm_skb = NULL; |
755 | tipc_link_reset_fragments(l_ptr); | 755 | tipc_link_reset_fragments(l_ptr); |
@@ -989,7 +989,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | |||
989 | * Returns true if state is ok, otherwise consumes buffer and returns false | 989 | * Returns true if state is ok, otherwise consumes buffer and returns false |
990 | */ | 990 | */ |
991 | static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, | 991 | static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, |
992 | int bearer_id) | 992 | int bearer_id, struct sk_buff_head *xmitq) |
993 | { | 993 | { |
994 | struct tipc_msg *hdr = buf_msg(skb); | 994 | struct tipc_msg *hdr = buf_msg(skb); |
995 | int usr = msg_user(hdr); | 995 | int usr = msg_user(hdr); |
@@ -1042,42 +1042,47 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, | |||
1042 | /* Initiate or update failover mode if applicable */ | 1042 | /* Initiate or update failover mode if applicable */ |
1043 | if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { | 1043 | if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { |
1044 | syncpt = oseqno + exp_pkts - 1; | 1044 | syncpt = oseqno + exp_pkts - 1; |
1045 | if (pl && tipc_link_is_up(pl)) { | 1045 | if (pl && tipc_link_is_up(pl)) |
1046 | tipc_node_link_down(n, pl->bearer_id); | 1046 | tipc_node_link_down(n, pl->bearer_id); |
1047 | pl->exec_mode = TIPC_LINK_BLOCKED; | 1047 | |
1048 | } | ||
1049 | /* If pkts arrive out of order, use lowest calculated syncpt */ | 1048 | /* If pkts arrive out of order, use lowest calculated syncpt */ |
1050 | if (less(syncpt, n->sync_point)) | 1049 | if (less(syncpt, n->sync_point)) |
1051 | n->sync_point = syncpt; | 1050 | n->sync_point = syncpt; |
1052 | } | 1051 | } |
1053 | 1052 | ||
1054 | /* Open parallel link when tunnel link reaches synch point */ | 1053 | /* Open parallel link when tunnel link reaches synch point */ |
1055 | if ((n->state == NODE_FAILINGOVER) && (more(rcv_nxt, n->sync_point))) { | 1054 | if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) { |
1055 | if (!more(rcv_nxt, n->sync_point)) | ||
1056 | return true; | ||
1056 | tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); | 1057 | tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); |
1057 | if (pl) | 1058 | if (pl) |
1058 | pl->exec_mode = TIPC_LINK_OPEN; | 1059 | tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); |
1059 | return true; | 1060 | return true; |
1060 | } | 1061 | } |
1061 | 1062 | ||
1062 | /* Initiate or update synch mode if applicable */ | 1063 | /* Initiate or update synch mode if applicable */ |
1063 | if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) { | 1064 | if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) { |
1064 | syncpt = iseqno + exp_pkts - 1; | 1065 | syncpt = iseqno + exp_pkts - 1; |
1066 | if (!tipc_link_is_up(l)) { | ||
1067 | tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); | ||
1068 | tipc_node_link_up(n, bearer_id, xmitq); | ||
1069 | } | ||
1065 | if (n->state == SELF_UP_PEER_UP) { | 1070 | if (n->state == SELF_UP_PEER_UP) { |
1066 | n->sync_point = syncpt; | 1071 | n->sync_point = syncpt; |
1072 | tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); | ||
1067 | tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); | 1073 | tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); |
1068 | } | 1074 | } |
1069 | l->exec_mode = TIPC_LINK_TUNNEL; | ||
1070 | if (less(syncpt, n->sync_point)) | 1075 | if (less(syncpt, n->sync_point)) |
1071 | n->sync_point = syncpt; | 1076 | n->sync_point = syncpt; |
1072 | } | 1077 | } |
1073 | 1078 | ||
1074 | /* Open tunnel link when parallel link reaches synch point */ | 1079 | /* Open tunnel link when parallel link reaches synch point */ |
1075 | if ((n->state == NODE_SYNCHING) && (l->exec_mode == TIPC_LINK_TUNNEL)) { | 1080 | if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) { |
1076 | if (pl) | 1081 | if (pl) |
1077 | dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq)); | 1082 | dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq)); |
1078 | if (!pl || more(dlv_nxt, n->sync_point)) { | 1083 | if (!pl || more(dlv_nxt, n->sync_point)) { |
1084 | tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT); | ||
1079 | tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); | 1085 | tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); |
1080 | l->exec_mode = TIPC_LINK_OPEN; | ||
1081 | return true; | 1086 | return true; |
1082 | } | 1087 | } |
1083 | if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) | 1088 | if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) |
@@ -1143,7 +1148,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) | |||
1143 | tipc_bclink_acknowledge(n, msg_bcast_ack(hdr)); | 1148 | tipc_bclink_acknowledge(n, msg_bcast_ack(hdr)); |
1144 | 1149 | ||
1145 | /* Check and if necessary update node state */ | 1150 | /* Check and if necessary update node state */ |
1146 | if (likely(tipc_node_check_state(n, skb, bearer_id))) { | 1151 | if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) { |
1147 | rc = tipc_link_rcv(le->link, skb, &xmitq); | 1152 | rc = tipc_link_rcv(le->link, skb, &xmitq); |
1148 | skb = NULL; | 1153 | skb = NULL; |
1149 | } | 1154 | } |