diff options
author | Joe Perches <joe@perches.com> | 2009-11-29 19:55:45 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-29 19:55:45 -0500 |
commit | f64f9e719261a87818dd192a3a2352e5b20fbd0f (patch) | |
tree | b2d5cbaef3df615295f6061d8c4d6a912690556c /net/rds | |
parent | 152b6a62aea2d43359dd37004e9c218bf7bdeb3b (diff) |
net: Move && and || to end of previous line
Not including net/atm/
Compiled tested x86 allyesconfig only
Added a > 80 column line or two, which I ignored.
Existing checkpatch plaints willfully, cheerfully ignored.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/af_rds.c | 8 | ||||
-rw-r--r-- | net/rds/connection.c | 6 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 4 | ||||
-rw-r--r-- | net/rds/ib_rdma.c | 4 | ||||
-rw-r--r-- | net/rds/ib_recv.c | 12 | ||||
-rw-r--r-- | net/rds/ib_send.c | 4 | ||||
-rw-r--r-- | net/rds/iw_rdma.c | 4 | ||||
-rw-r--r-- | net/rds/iw_recv.c | 12 | ||||
-rw-r--r-- | net/rds/iw_send.c | 7 | ||||
-rw-r--r-- | net/rds/message.c | 3 | ||||
-rw-r--r-- | net/rds/rdma.c | 12 | ||||
-rw-r--r-- | net/rds/recv.c | 11 | ||||
-rw-r--r-- | net/rds/send.c | 27 | ||||
-rw-r--r-- | net/rds/threads.c | 4 |
14 files changed, 56 insertions, 62 deletions
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index e25d8d5ce8df..853c52be781f 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -174,8 +174,8 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
174 | mask |= (POLLIN | POLLRDNORM); | 174 | mask |= (POLLIN | POLLRDNORM); |
175 | spin_unlock(&rs->rs_lock); | 175 | spin_unlock(&rs->rs_lock); |
176 | } | 176 | } |
177 | if (!list_empty(&rs->rs_recv_queue) | 177 | if (!list_empty(&rs->rs_recv_queue) || |
178 | || !list_empty(&rs->rs_notify_queue)) | 178 | !list_empty(&rs->rs_notify_queue)) |
179 | mask |= (POLLIN | POLLRDNORM); | 179 | mask |= (POLLIN | POLLRDNORM); |
180 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) | 180 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) |
181 | mask |= (POLLOUT | POLLWRNORM); | 181 | mask |= (POLLOUT | POLLWRNORM); |
@@ -308,8 +308,8 @@ static int rds_getsockopt(struct socket *sock, int level, int optname, | |||
308 | if (len < sizeof(int)) | 308 | if (len < sizeof(int)) |
309 | ret = -EINVAL; | 309 | ret = -EINVAL; |
310 | else | 310 | else |
311 | if (put_user(rs->rs_recverr, (int __user *) optval) | 311 | if (put_user(rs->rs_recverr, (int __user *) optval) || |
312 | || put_user(sizeof(int), optlen)) | 312 | put_user(sizeof(int), optlen)) |
313 | ret = -EFAULT; | 313 | ret = -EFAULT; |
314 | else | 314 | else |
315 | ret = 0; | 315 | ret = 0; |
diff --git a/net/rds/connection.c b/net/rds/connection.c index cc8b568c0c84..278f607ab603 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -133,10 +133,8 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, | |||
133 | 133 | ||
134 | spin_lock_irqsave(&rds_conn_lock, flags); | 134 | spin_lock_irqsave(&rds_conn_lock, flags); |
135 | conn = rds_conn_lookup(head, laddr, faddr, trans); | 135 | conn = rds_conn_lookup(head, laddr, faddr, trans); |
136 | if (conn | 136 | if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && |
137 | && conn->c_loopback | 137 | !is_outgoing) { |
138 | && conn->c_trans != &rds_loop_transport | ||
139 | && !is_outgoing) { | ||
140 | /* This is a looped back IB connection, and we're | 138 | /* This is a looped back IB connection, and we're |
141 | * called by the code handling the incoming connect. | 139 | * called by the code handling the incoming connect. |
142 | * We need a second connection object into which we | 140 | * We need a second connection object into which we |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 9d320692a4fc..647cb8ffc39b 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -377,8 +377,8 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) | |||
377 | } | 377 | } |
378 | 378 | ||
379 | /* Even if len is crap *now* I still want to check it. -ASG */ | 379 | /* Even if len is crap *now* I still want to check it. -ASG */ |
380 | if (event->param.conn.private_data_len < sizeof (*dp) | 380 | if (event->param.conn.private_data_len < sizeof (*dp) || |
381 | || dp->dp_protocol_major == 0) | 381 | dp->dp_protocol_major == 0) |
382 | return RDS_PROTOCOL_3_0; | 382 | return RDS_PROTOCOL_3_0; |
383 | 383 | ||
384 | common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; | 384 | common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index c5e916598c14..4b0da865a72c 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -570,8 +570,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
570 | spin_unlock_irqrestore(&pool->list_lock, flags); | 570 | spin_unlock_irqrestore(&pool->list_lock, flags); |
571 | 571 | ||
572 | /* If we've pinned too many pages, request a flush */ | 572 | /* If we've pinned too many pages, request a flush */ |
573 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned | 573 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
574 | || atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 574 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
575 | queue_work(rds_wq, &pool->flush_worker); | 575 | queue_work(rds_wq, &pool->flush_worker); |
576 | 576 | ||
577 | if (invalidate) { | 577 | if (invalidate) { |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index fe5ab8c6b964..04dc0d3f3c95 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -230,8 +230,8 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, | |||
230 | int ret = 0; | 230 | int ret = 0; |
231 | u32 pos; | 231 | u32 pos; |
232 | 232 | ||
233 | while ((prefill || rds_conn_up(conn)) | 233 | while ((prefill || rds_conn_up(conn)) && |
234 | && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { | 234 | rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { |
235 | if (pos >= ic->i_recv_ring.w_nr) { | 235 | if (pos >= ic->i_recv_ring.w_nr) { |
236 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", | 236 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", |
237 | pos); | 237 | pos); |
@@ -771,10 +771,10 @@ static void rds_ib_process_recv(struct rds_connection *conn, | |||
771 | hdr = &ibinc->ii_inc.i_hdr; | 771 | hdr = &ibinc->ii_inc.i_hdr; |
772 | /* We can't just use memcmp here; fragments of a | 772 | /* We can't just use memcmp here; fragments of a |
773 | * single message may carry different ACKs */ | 773 | * single message may carry different ACKs */ |
774 | if (hdr->h_sequence != ihdr->h_sequence | 774 | if (hdr->h_sequence != ihdr->h_sequence || |
775 | || hdr->h_len != ihdr->h_len | 775 | hdr->h_len != ihdr->h_len || |
776 | || hdr->h_sport != ihdr->h_sport | 776 | hdr->h_sport != ihdr->h_sport || |
777 | || hdr->h_dport != ihdr->h_dport) { | 777 | hdr->h_dport != ihdr->h_dport) { |
778 | rds_ib_conn_error(conn, | 778 | rds_ib_conn_error(conn, |
779 | "fragment header mismatch; forcing reconnect\n"); | 779 | "fragment header mismatch; forcing reconnect\n"); |
780 | return; | 780 | return; |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 23bf830db2d5..a10fab6886d1 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -252,8 +252,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
252 | 252 | ||
253 | rds_ib_ring_free(&ic->i_send_ring, completed); | 253 | rds_ib_ring_free(&ic->i_send_ring, completed); |
254 | 254 | ||
255 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) | 255 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || |
256 | || test_bit(0, &conn->c_map_queued)) | 256 | test_bit(0, &conn->c_map_queued)) |
257 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 257 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
258 | 258 | ||
259 | /* We expect errors as the qp is drained during shutdown */ | 259 | /* We expect errors as the qp is drained during shutdown */ |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index b25d785e49fc..9eda11cca956 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -573,8 +573,8 @@ void rds_iw_free_mr(void *trans_private, int invalidate) | |||
573 | rds_iw_free_fastreg(pool, ibmr); | 573 | rds_iw_free_fastreg(pool, ibmr); |
574 | 574 | ||
575 | /* If we've pinned too many pages, request a flush */ | 575 | /* If we've pinned too many pages, request a flush */ |
576 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned | 576 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
577 | || atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 577 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
578 | queue_work(rds_wq, &pool->flush_worker); | 578 | queue_work(rds_wq, &pool->flush_worker); |
579 | 579 | ||
580 | if (invalidate) { | 580 | if (invalidate) { |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 24fc53f03833..54af7d6b92da 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -230,8 +230,8 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, | |||
230 | int ret = 0; | 230 | int ret = 0; |
231 | u32 pos; | 231 | u32 pos; |
232 | 232 | ||
233 | while ((prefill || rds_conn_up(conn)) | 233 | while ((prefill || rds_conn_up(conn)) && |
234 | && rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) { | 234 | rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) { |
235 | if (pos >= ic->i_recv_ring.w_nr) { | 235 | if (pos >= ic->i_recv_ring.w_nr) { |
236 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", | 236 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", |
237 | pos); | 237 | pos); |
@@ -730,10 +730,10 @@ static void rds_iw_process_recv(struct rds_connection *conn, | |||
730 | hdr = &iwinc->ii_inc.i_hdr; | 730 | hdr = &iwinc->ii_inc.i_hdr; |
731 | /* We can't just use memcmp here; fragments of a | 731 | /* We can't just use memcmp here; fragments of a |
732 | * single message may carry different ACKs */ | 732 | * single message may carry different ACKs */ |
733 | if (hdr->h_sequence != ihdr->h_sequence | 733 | if (hdr->h_sequence != ihdr->h_sequence || |
734 | || hdr->h_len != ihdr->h_len | 734 | hdr->h_len != ihdr->h_len || |
735 | || hdr->h_sport != ihdr->h_sport | 735 | hdr->h_sport != ihdr->h_sport || |
736 | || hdr->h_dport != ihdr->h_dport) { | 736 | hdr->h_dport != ihdr->h_dport) { |
737 | rds_iw_conn_error(conn, | 737 | rds_iw_conn_error(conn, |
738 | "fragment header mismatch; forcing reconnect\n"); | 738 | "fragment header mismatch; forcing reconnect\n"); |
739 | return; | 739 | return; |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 1f5abe3cf2b4..1379e9d66a78 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -288,8 +288,8 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
288 | 288 | ||
289 | rds_iw_ring_free(&ic->i_send_ring, completed); | 289 | rds_iw_ring_free(&ic->i_send_ring, completed); |
290 | 290 | ||
291 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) | 291 | if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || |
292 | || test_bit(0, &conn->c_map_queued)) | 292 | test_bit(0, &conn->c_map_queued)) |
293 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 293 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
294 | 294 | ||
295 | /* We expect errors as the qp is drained during shutdown */ | 295 | /* We expect errors as the qp is drained during shutdown */ |
@@ -519,8 +519,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
519 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | 519 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
520 | 520 | ||
521 | /* Fastreg support */ | 521 | /* Fastreg support */ |
522 | if (rds_rdma_cookie_key(rm->m_rdma_cookie) | 522 | if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) { |
523 | && !ic->i_fastreg_posted) { | ||
524 | ret = -EAGAIN; | 523 | ret = -EAGAIN; |
525 | goto out; | 524 | goto out; |
526 | } | 525 | } |
diff --git a/net/rds/message.c b/net/rds/message.c index ca50a8ec9742..73e600ffd87f 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -122,8 +122,7 @@ int rds_message_add_extension(struct rds_header *hdr, | |||
122 | if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE) | 122 | if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE) |
123 | return 0; | 123 | return 0; |
124 | 124 | ||
125 | if (type >= __RDS_EXTHDR_MAX | 125 | if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type]) |
126 | || len != rds_exthdr_size[type]) | ||
127 | return 0; | 126 | return 0; |
128 | 127 | ||
129 | if (ext_len >= RDS_HEADER_EXT_SPACE) | 128 | if (ext_len >= RDS_HEADER_EXT_SPACE) |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 971b5a668458..4c64daa1f5d5 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -631,8 +631,8 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
631 | { | 631 | { |
632 | struct rds_rdma_op *op; | 632 | struct rds_rdma_op *op; |
633 | 633 | ||
634 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) | 634 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || |
635 | || rm->m_rdma_op != NULL) | 635 | rm->m_rdma_op != NULL) |
636 | return -EINVAL; | 636 | return -EINVAL; |
637 | 637 | ||
638 | op = rds_rdma_prepare(rs, CMSG_DATA(cmsg)); | 638 | op = rds_rdma_prepare(rs, CMSG_DATA(cmsg)); |
@@ -655,8 +655,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, | |||
655 | u32 r_key; | 655 | u32 r_key; |
656 | int err = 0; | 656 | int err = 0; |
657 | 657 | ||
658 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) | 658 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || |
659 | || rm->m_rdma_cookie != 0) | 659 | rm->m_rdma_cookie != 0) |
660 | return -EINVAL; | 660 | return -EINVAL; |
661 | 661 | ||
662 | memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); | 662 | memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); |
@@ -692,8 +692,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, | |||
692 | int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, | 692 | int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, |
693 | struct cmsghdr *cmsg) | 693 | struct cmsghdr *cmsg) |
694 | { | 694 | { |
695 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) | 695 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || |
696 | || rm->m_rdma_cookie != 0) | 696 | rm->m_rdma_cookie != 0) |
697 | return -EINVAL; | 697 | return -EINVAL; |
698 | 698 | ||
699 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); | 699 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); |
diff --git a/net/rds/recv.c b/net/rds/recv.c index fdff33c7b432..b426d67f760c 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
@@ -195,8 +195,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, | |||
195 | * XXX we could spend more on the wire to get more robust failure | 195 | * XXX we could spend more on the wire to get more robust failure |
196 | * detection, arguably worth it to avoid data corruption. | 196 | * detection, arguably worth it to avoid data corruption. |
197 | */ | 197 | */ |
198 | if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq | 198 | if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq && |
199 | && (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { | 199 | (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { |
200 | rds_stats_inc(s_recv_drop_old_seq); | 200 | rds_stats_inc(s_recv_drop_old_seq); |
201 | goto out; | 201 | goto out; |
202 | } | 202 | } |
@@ -432,10 +432,9 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
432 | } | 432 | } |
433 | 433 | ||
434 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, | 434 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, |
435 | (!list_empty(&rs->rs_notify_queue) | 435 | (!list_empty(&rs->rs_notify_queue) || |
436 | || rs->rs_cong_notify | 436 | rs->rs_cong_notify || |
437 | || rds_next_incoming(rs, &inc)), | 437 | rds_next_incoming(rs, &inc)), timeo); |
438 | timeo); | ||
439 | rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, | 438 | rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, |
440 | timeo); | 439 | timeo); |
441 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) | 440 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) |
diff --git a/net/rds/send.c b/net/rds/send.c index 28c88ff3d038..b2fccfc20769 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -235,8 +235,8 @@ int rds_send_xmit(struct rds_connection *conn) | |||
235 | * connection. | 235 | * connection. |
236 | * Therefore, we never retransmit messages with RDMA ops. | 236 | * Therefore, we never retransmit messages with RDMA ops. |
237 | */ | 237 | */ |
238 | if (rm->m_rdma_op | 238 | if (rm->m_rdma_op && |
239 | && test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { | 239 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
240 | spin_lock_irqsave(&conn->c_lock, flags); | 240 | spin_lock_irqsave(&conn->c_lock, flags); |
241 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) | 241 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
242 | list_move(&rm->m_conn_item, &to_be_dropped); | 242 | list_move(&rm->m_conn_item, &to_be_dropped); |
@@ -247,8 +247,8 @@ int rds_send_xmit(struct rds_connection *conn) | |||
247 | 247 | ||
248 | /* Require an ACK every once in a while */ | 248 | /* Require an ACK every once in a while */ |
249 | len = ntohl(rm->m_inc.i_hdr.h_len); | 249 | len = ntohl(rm->m_inc.i_hdr.h_len); |
250 | if (conn->c_unacked_packets == 0 | 250 | if (conn->c_unacked_packets == 0 || |
251 | || conn->c_unacked_bytes < len) { | 251 | conn->c_unacked_bytes < len) { |
252 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 252 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
253 | 253 | ||
254 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | 254 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; |
@@ -418,8 +418,8 @@ void rds_rdma_send_complete(struct rds_message *rm, int status) | |||
418 | spin_lock(&rm->m_rs_lock); | 418 | spin_lock(&rm->m_rs_lock); |
419 | 419 | ||
420 | ro = rm->m_rdma_op; | 420 | ro = rm->m_rdma_op; |
421 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | 421 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
422 | && ro && ro->r_notify && ro->r_notifier) { | 422 | ro && ro->r_notify && ro->r_notifier) { |
423 | notifier = ro->r_notifier; | 423 | notifier = ro->r_notifier; |
424 | rs = rm->m_rs; | 424 | rs = rm->m_rs; |
425 | sock_hold(rds_rs_to_sk(rs)); | 425 | sock_hold(rds_rs_to_sk(rs)); |
@@ -549,8 +549,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
549 | list_del_init(&rm->m_sock_item); | 549 | list_del_init(&rm->m_sock_item); |
550 | rds_send_sndbuf_remove(rs, rm); | 550 | rds_send_sndbuf_remove(rs, rm); |
551 | 551 | ||
552 | if (ro && ro->r_notifier | 552 | if (ro && ro->r_notifier && (status || ro->r_notify)) { |
553 | && (status || ro->r_notify)) { | ||
554 | notifier = ro->r_notifier; | 553 | notifier = ro->r_notifier; |
555 | list_add_tail(¬ifier->n_list, | 554 | list_add_tail(¬ifier->n_list, |
556 | &rs->rs_notify_queue); | 555 | &rs->rs_notify_queue); |
@@ -877,8 +876,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
877 | if (ret) | 876 | if (ret) |
878 | goto out; | 877 | goto out; |
879 | 878 | ||
880 | if ((rm->m_rdma_cookie || rm->m_rdma_op) | 879 | if ((rm->m_rdma_cookie || rm->m_rdma_op) && |
881 | && conn->c_trans->xmit_rdma == NULL) { | 880 | conn->c_trans->xmit_rdma == NULL) { |
882 | if (printk_ratelimit()) | 881 | if (printk_ratelimit()) |
883 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", | 882 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", |
884 | rm->m_rdma_op, conn->c_trans->xmit_rdma); | 883 | rm->m_rdma_op, conn->c_trans->xmit_rdma); |
@@ -890,8 +889,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
890 | * have scheduled a delayed reconnect however - in this case | 889 | * have scheduled a delayed reconnect however - in this case |
891 | * we should not interfere. | 890 | * we should not interfere. |
892 | */ | 891 | */ |
893 | if (rds_conn_state(conn) == RDS_CONN_DOWN | 892 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
894 | && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | 893 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) |
895 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 894 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
896 | 895 | ||
897 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 896 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
@@ -973,8 +972,8 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) | |||
973 | * have scheduled a delayed reconnect however - in this case | 972 | * have scheduled a delayed reconnect however - in this case |
974 | * we should not interfere. | 973 | * we should not interfere. |
975 | */ | 974 | */ |
976 | if (rds_conn_state(conn) == RDS_CONN_DOWN | 975 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
977 | && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | 976 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) |
978 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 977 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
979 | 978 | ||
980 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); | 979 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); |
diff --git a/net/rds/threads.c b/net/rds/threads.c index dd7e0cad1e7c..00fa10e59af8 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -170,8 +170,8 @@ void rds_shutdown_worker(struct work_struct *work) | |||
170 | * handler is supposed to check for state DISCONNECTING | 170 | * handler is supposed to check for state DISCONNECTING |
171 | */ | 171 | */ |
172 | mutex_lock(&conn->c_cm_lock); | 172 | mutex_lock(&conn->c_cm_lock); |
173 | if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) | 173 | if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) && |
174 | && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { | 174 | !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { |
175 | rds_conn_error(conn, "shutdown called in state %d\n", | 175 | rds_conn_error(conn, "shutdown called in state %d\n", |
176 | atomic_read(&conn->c_state)); | 176 | atomic_read(&conn->c_state)); |
177 | mutex_unlock(&conn->c_cm_lock); | 177 | mutex_unlock(&conn->c_cm_lock); |