aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-07-16 16:54:26 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-20 23:41:15 -0400
commit426cc2b86d1813959497d608dcb52c32df2d448a (patch)
treed7c7bc6edede6adf04456337e497add82c679c86 /net/tipc
parentd3504c3449fead545e5254bfb11da916f72c4734 (diff)
tipc: introduce new link protocol msg create function
As a preparation for later changes, we introduce a new function tipc_link_build_proto_msg(). Instead of actually sending the created protocol message, it only creates it and adds it to the head of a skb queue provided by the caller. Since we still need the existing function tipc_link_protocol_xmit() for a while, we redesign it to make use of the new function. Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/link.c144
1 files changed, 77 insertions, 67 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 35a2da688db1..657ba91fde41 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -129,6 +129,9 @@ static void tipc_link_proto_rcv(struct tipc_link *link,
129 struct sk_buff *skb); 129 struct sk_buff *skb);
130static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); 130static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
131static void link_state_event(struct tipc_link *l_ptr, u32 event); 131static void link_state_event(struct tipc_link *l_ptr, u32 event);
132static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
133 u16 rcvgap, int tolerance, int priority,
134 struct sk_buff_head *xmitq);
132static void link_reset_statistics(struct tipc_link *l_ptr); 135static void link_reset_statistics(struct tipc_link *l_ptr);
133static void link_print(struct tipc_link *l_ptr, const char *str); 136static void link_print(struct tipc_link *l_ptr, const char *str);
134static void tipc_link_sync_xmit(struct tipc_link *l); 137static void tipc_link_sync_xmit(struct tipc_link *l);
@@ -1323,77 +1326,21 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1323/* 1326/*
1324 * Send protocol message to the other endpoint. 1327 * Send protocol message to the other endpoint.
1325 */ 1328 */
1326void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, 1329void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
1327 u32 gap, u32 tolerance, u32 priority) 1330 u32 gap, u32 tolerance, u32 priority)
1328{ 1331{
1329 struct sk_buff *buf = NULL; 1332 struct sk_buff *skb = NULL;
1330 struct tipc_msg *msg = l_ptr->pmsg; 1333 struct sk_buff_head xmitq;
1331 u32 msg_size = sizeof(l_ptr->proto_msg);
1332 int r_flag;
1333 u16 last_rcv;
1334 1334
1335 /* Don't send protocol message during link failover */ 1335 __skb_queue_head_init(&xmitq);
1336 if (l_ptr->exec_mode == TIPC_LINK_BLOCKED) 1336 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1337 return; 1337 tolerance, priority, &xmitq);
1338 1338 skb = __skb_dequeue(&xmitq);
1339 /* Abort non-RESET send if communication with node is prohibited */ 1339 if (!skb)
1340 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1341 return;
1342
1343 /* Create protocol message with "out-of-sequence" sequence number */
1344 msg_set_type(msg, msg_typ);
1345 msg_set_net_plane(msg, l_ptr->net_plane);
1346 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1347 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1348
1349 if (msg_typ == STATE_MSG) {
1350 u16 next_sent = l_ptr->snd_nxt;
1351
1352 if (!tipc_link_is_up(l_ptr))
1353 return;
1354 msg_set_next_sent(msg, next_sent);
1355 if (!skb_queue_empty(&l_ptr->deferdq)) {
1356 last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
1357 gap = mod(last_rcv - l_ptr->rcv_nxt);
1358 }
1359 msg_set_seq_gap(msg, gap);
1360 if (gap)
1361 l_ptr->stats.sent_nacks++;
1362 msg_set_link_tolerance(msg, tolerance);
1363 msg_set_linkprio(msg, priority);
1364 msg_set_max_pkt(msg, l_ptr->mtu);
1365 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
1366 msg_set_probe(msg, probe_msg != 0);
1367 if (probe_msg)
1368 l_ptr->stats.sent_probes++;
1369 l_ptr->stats.sent_states++;
1370 } else { /* RESET_MSG or ACTIVATE_MSG */
1371 msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
1372 msg_set_seq_gap(msg, 0);
1373 msg_set_next_sent(msg, 1);
1374 msg_set_probe(msg, 0);
1375 msg_set_link_tolerance(msg, l_ptr->tolerance);
1376 msg_set_linkprio(msg, l_ptr->priority);
1377 msg_set_max_pkt(msg, l_ptr->advertised_mtu);
1378 }
1379
1380 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1381 msg_set_redundant_link(msg, r_flag);
1382 msg_set_linkprio(msg, l_ptr->priority);
1383 msg_set_size(msg, msg_size);
1384
1385 msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
1386
1387 buf = tipc_buf_acquire(msg_size);
1388 if (!buf)
1389 return; 1340 return;
1390 1341 tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
1391 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1342 l->rcv_unacked = 0;
1392 buf->priority = TC_PRIO_CONTROL; 1343 kfree_skb(skb);
1393 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1394 &l_ptr->media_addr);
1395 l_ptr->rcv_unacked = 0;
1396 kfree_skb(buf);
1397} 1344}
1398 1345
1399/* 1346/*
@@ -1514,6 +1461,69 @@ exit:
1514 kfree_skb(buf); 1461 kfree_skb(buf);
1515} 1462}
1516 1463
1464/* tipc_link_build_proto_msg: prepare link protocol message for transmission
1465 */
1466static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1467 u16 rcvgap, int tolerance, int priority,
1468 struct sk_buff_head *xmitq)
1469{
1470 struct sk_buff *skb = NULL;
1471 struct tipc_msg *hdr = l->pmsg;
1472 u16 snd_nxt = l->snd_nxt;
1473 u16 rcv_nxt = l->rcv_nxt;
1474 u16 rcv_last = rcv_nxt - 1;
1475 int node_up = l->owner->bclink.recv_permitted;
1476
1477 /* Don't send protocol message during reset or link failover */
1478 if (l->exec_mode == TIPC_LINK_BLOCKED)
1479 return;
1480
1481 /* Abort non-RESET send if communication with node is prohibited */
1482 if ((tipc_node_blocked(l->owner)) && (mtyp != RESET_MSG))
1483 return;
1484
1485 msg_set_type(hdr, mtyp);
1486 msg_set_net_plane(hdr, l->net_plane);
1487 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1488 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
1489 msg_set_link_tolerance(hdr, tolerance);
1490 msg_set_linkprio(hdr, priority);
1491 msg_set_redundant_link(hdr, node_up);
1492 msg_set_seq_gap(hdr, 0);
1493
1494 /* Compatibility: created msg must not be in sequence with pkt flow */
1495 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
1496
1497 if (mtyp == STATE_MSG) {
1498 if (!tipc_link_is_up(l))
1499 return;
1500 msg_set_next_sent(hdr, snd_nxt);
1501
1502 /* Override rcvgap if there are packets in deferred queue */
1503 if (!skb_queue_empty(&l->deferdq))
1504 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
1505 if (rcvgap) {
1506 msg_set_seq_gap(hdr, rcvgap);
1507 l->stats.sent_nacks++;
1508 }
1509 msg_set_ack(hdr, rcv_last);
1510 msg_set_probe(hdr, probe);
1511 if (probe)
1512 l->stats.sent_probes++;
1513 l->stats.sent_states++;
1514 } else {
1515 /* RESET_MSG or ACTIVATE_MSG */
1516 msg_set_max_pkt(hdr, l->advertised_mtu);
1517 msg_set_ack(hdr, l->failover_checkpt - 1);
1518 msg_set_next_sent(hdr, 1);
1519 }
1520 skb = tipc_buf_acquire(msg_size(hdr));
1521 if (!skb)
1522 return;
1523 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1524 skb->priority = TC_PRIO_CONTROL;
1525 __skb_queue_head(xmitq, skb);
1526}
1517 1527
1518/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to 1528/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1519 * a different bearer. Owner node is locked. 1529 * a different bearer. Owner node is locked.