diff options
Diffstat (limited to 'net/rds/send.c')
-rw-r--r-- | net/rds/send.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/net/rds/send.c b/net/rds/send.c index 28c88ff3d038..f04b929ded92 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/gfp.h> | ||
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
35 | #include <linux/in.h> | 36 | #include <linux/in.h> |
36 | #include <linux/list.h> | 37 | #include <linux/list.h> |
@@ -235,8 +236,8 @@ int rds_send_xmit(struct rds_connection *conn) | |||
235 | * connection. | 236 | * connection. |
236 | * Therefore, we never retransmit messages with RDMA ops. | 237 | * Therefore, we never retransmit messages with RDMA ops. |
237 | */ | 238 | */ |
238 | if (rm->m_rdma_op | 239 | if (rm->m_rdma_op && |
239 | && test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { | 240 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
240 | spin_lock_irqsave(&conn->c_lock, flags); | 241 | spin_lock_irqsave(&conn->c_lock, flags); |
241 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) | 242 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
242 | list_move(&rm->m_conn_item, &to_be_dropped); | 243 | list_move(&rm->m_conn_item, &to_be_dropped); |
@@ -247,8 +248,8 @@ int rds_send_xmit(struct rds_connection *conn) | |||
247 | 248 | ||
248 | /* Require an ACK every once in a while */ | 249 | /* Require an ACK every once in a while */ |
249 | len = ntohl(rm->m_inc.i_hdr.h_len); | 250 | len = ntohl(rm->m_inc.i_hdr.h_len); |
250 | if (conn->c_unacked_packets == 0 | 251 | if (conn->c_unacked_packets == 0 || |
251 | || conn->c_unacked_bytes < len) { | 252 | conn->c_unacked_bytes < len) { |
252 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 253 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
253 | 254 | ||
254 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | 255 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; |
@@ -418,8 +419,8 @@ void rds_rdma_send_complete(struct rds_message *rm, int status) | |||
418 | spin_lock(&rm->m_rs_lock); | 419 | spin_lock(&rm->m_rs_lock); |
419 | 420 | ||
420 | ro = rm->m_rdma_op; | 421 | ro = rm->m_rdma_op; |
421 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | 422 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
422 | && ro && ro->r_notify && ro->r_notifier) { | 423 | ro && ro->r_notify && ro->r_notifier) { |
423 | notifier = ro->r_notifier; | 424 | notifier = ro->r_notifier; |
424 | rs = rm->m_rs; | 425 | rs = rm->m_rs; |
425 | sock_hold(rds_rs_to_sk(rs)); | 426 | sock_hold(rds_rs_to_sk(rs)); |
@@ -549,8 +550,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
549 | list_del_init(&rm->m_sock_item); | 550 | list_del_init(&rm->m_sock_item); |
550 | rds_send_sndbuf_remove(rs, rm); | 551 | rds_send_sndbuf_remove(rs, rm); |
551 | 552 | ||
552 | if (ro && ro->r_notifier | 553 | if (ro && ro->r_notifier && (status || ro->r_notify)) { |
553 | && (status || ro->r_notify)) { | ||
554 | notifier = ro->r_notifier; | 554 | notifier = ro->r_notifier; |
555 | list_add_tail(¬ifier->n_list, | 555 | list_add_tail(¬ifier->n_list, |
556 | &rs->rs_notify_queue); | 556 | &rs->rs_notify_queue); |
@@ -877,8 +877,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
877 | if (ret) | 877 | if (ret) |
878 | goto out; | 878 | goto out; |
879 | 879 | ||
880 | if ((rm->m_rdma_cookie || rm->m_rdma_op) | 880 | if ((rm->m_rdma_cookie || rm->m_rdma_op) && |
881 | && conn->c_trans->xmit_rdma == NULL) { | 881 | conn->c_trans->xmit_rdma == NULL) { |
882 | if (printk_ratelimit()) | 882 | if (printk_ratelimit()) |
883 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", | 883 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", |
884 | rm->m_rdma_op, conn->c_trans->xmit_rdma); | 884 | rm->m_rdma_op, conn->c_trans->xmit_rdma); |
@@ -890,8 +890,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
890 | * have scheduled a delayed reconnect however - in this case | 890 | * have scheduled a delayed reconnect however - in this case |
891 | * we should not interfere. | 891 | * we should not interfere. |
892 | */ | 892 | */ |
893 | if (rds_conn_state(conn) == RDS_CONN_DOWN | 893 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
894 | && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | 894 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) |
895 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 895 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
896 | 896 | ||
897 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 897 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
@@ -973,8 +973,8 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) | |||
973 | * have scheduled a delayed reconnect however - in this case | 973 | * have scheduled a delayed reconnect however - in this case |
974 | * we should not interfere. | 974 | * we should not interfere. |
975 | */ | 975 | */ |
976 | if (rds_conn_state(conn) == RDS_CONN_DOWN | 976 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
977 | && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | 977 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) |
978 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 978 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
979 | 979 | ||
980 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); | 980 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); |