aboutsummaryrefslogtreecommitdiffstats
path: root/net/smc
diff options
context:
space:
mode:
authorUrsula Braun <ursula.braun@de.ibm.com>2018-06-28 13:05:06 -0400
committerDavid S. Miller <davem@davemloft.net>2018-06-30 07:42:25 -0400
commite82f2e31f5597a3de44bd27b7427f577f637c552 (patch)
treec9926ddd6e37961804a2ee7e9966ddeb5281d4e7 /net/smc
parent0afff91c6f5ecef27715ea71e34dc2baacba1060 (diff)
net/smc: optimize consumer cursor updates
The SMC protocol requires to send a separate consumer cursor update, if it cannot be piggybacked to updates of the producer cursor. Currently the decision to send a separate consumer cursor update just considers the amount of data already received by the socket program. It does not consider the amount of data already arrived, but not yet consumed by the receiver. Basing the decision on the difference between already confirmed and already arrived data (instead of difference between already confirmed and already consumed data), may lead to a somewhat earlier consumer cursor update send in fast unidirectional traffic scenarios, and thus to better throughput. Signed-off-by: Ursula Braun <ubraun@linux.ibm.com> Suggested-by: Thomas Richter <tmricht@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/smc_tx.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index cee666400752..f82886b7d1d8 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -495,7 +495,8 @@ out:
495 495
496void smc_tx_consumer_update(struct smc_connection *conn, bool force) 496void smc_tx_consumer_update(struct smc_connection *conn, bool force)
497{ 497{
498 union smc_host_cursor cfed, cons; 498 union smc_host_cursor cfed, cons, prod;
499 int sender_free = conn->rmb_desc->len;
499 int to_confirm; 500 int to_confirm;
500 501
501 smc_curs_write(&cons, 502 smc_curs_write(&cons,
@@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
505 smc_curs_read(&conn->rx_curs_confirmed, conn), 506 smc_curs_read(&conn->rx_curs_confirmed, conn),
506 conn); 507 conn);
507 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); 508 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
509 if (to_confirm > conn->rmbe_update_limit) {
510 smc_curs_write(&prod,
511 smc_curs_read(&conn->local_rx_ctrl.prod, conn),
512 conn);
513 sender_free = conn->rmb_desc->len -
514 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
515 }
508 516
509 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 517 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
510 force || 518 force ||
511 ((to_confirm > conn->rmbe_update_limit) && 519 ((to_confirm > conn->rmbe_update_limit) &&
512 ((to_confirm > (conn->rmb_desc->len / 2)) || 520 ((sender_free <= (conn->rmb_desc->len / 2)) ||
513 conn->local_rx_ctrl.prod_flags.write_blocked))) { 521 conn->local_rx_ctrl.prod_flags.write_blocked))) {
514 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && 522 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
515 conn->alert_token_local) { /* connection healthy */ 523 conn->alert_token_local) { /* connection healthy */