summaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-05-17 20:44:08 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-19 14:36:49 -0400
commit38036629cded6b96a9f9689758a88d067c4d4d44 (patch)
tree419a1d0d02014f0fc416939a93792d1c4aee24f6 /net/rds
parente1daca289a36965d923ec26647b5668e023eb0ac (diff)
rds: tcp: block BH in TCP callbacks
TCP stack can now run from process context. Use read_lock_bh(&sk->sk_callback_lock) variant to restore previous assumption. Fixes: 5413d1babe8f ("net: do not block BH while processing socket backlog") Fixes: d41a69f1d390 ("tcp: make tcp_sendmsg() aware of socket backlog") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c4
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rds/tcp_send.c4
4 files changed, 8 insertions, 8 deletions
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 49a3fcfed360..fb82e0a0bf89 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
43 struct rds_connection *conn; 43 struct rds_connection *conn;
44 struct rds_tcp_connection *tc; 44 struct rds_tcp_connection *tc;
45 45
46 read_lock(&sk->sk_callback_lock); 46 read_lock_bh(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (!conn) { 48 if (!conn) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
@@ -69,7 +69,7 @@ void rds_tcp_state_change(struct sock *sk)
69 break; 69 break;
70 } 70 }
71out: 71out:
72 read_unlock(&sk->sk_callback_lock); 72 read_unlock_bh(&sk->sk_callback_lock);
73 state_change(sk); 73 state_change(sk);
74} 74}
75 75
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index be263cdf268b..3fa367945105 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -166,7 +166,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
166 166
167 rdsdebug("listen data ready sk %p\n", sk); 167 rdsdebug("listen data ready sk %p\n", sk);
168 168
169 read_lock(&sk->sk_callback_lock); 169 read_lock_bh(&sk->sk_callback_lock);
170 ready = sk->sk_user_data; 170 ready = sk->sk_user_data;
171 if (!ready) { /* check for teardown race */ 171 if (!ready) { /* check for teardown race */
172 ready = sk->sk_data_ready; 172 ready = sk->sk_data_ready;
@@ -183,7 +183,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
183 rds_tcp_accept_work(sk); 183 rds_tcp_accept_work(sk);
184 184
185out: 185out:
186 read_unlock(&sk->sk_callback_lock); 186 read_unlock_bh(&sk->sk_callback_lock);
187 ready(sk); 187 ready(sk);
188} 188}
189 189
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index d75d8b56a9e3..c3196f9d070a 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -301,7 +301,7 @@ void rds_tcp_data_ready(struct sock *sk)
301 301
302 rdsdebug("data ready sk %p\n", sk); 302 rdsdebug("data ready sk %p\n", sk);
303 303
304 read_lock(&sk->sk_callback_lock); 304 read_lock_bh(&sk->sk_callback_lock);
305 conn = sk->sk_user_data; 305 conn = sk->sk_user_data;
306 if (!conn) { /* check for teardown race */ 306 if (!conn) { /* check for teardown race */
307 ready = sk->sk_data_ready; 307 ready = sk->sk_data_ready;
@@ -315,7 +315,7 @@ void rds_tcp_data_ready(struct sock *sk)
315 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) 315 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
316 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 316 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
317out: 317out:
318 read_unlock(&sk->sk_callback_lock); 318 read_unlock_bh(&sk->sk_callback_lock);
319 ready(sk); 319 ready(sk);
320} 320}
321 321
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 2894e6095e3b..22d0f2020a79 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -180,7 +180,7 @@ void rds_tcp_write_space(struct sock *sk)
180 struct rds_connection *conn; 180 struct rds_connection *conn;
181 struct rds_tcp_connection *tc; 181 struct rds_tcp_connection *tc;
182 182
183 read_lock(&sk->sk_callback_lock); 183 read_lock_bh(&sk->sk_callback_lock);
184 conn = sk->sk_user_data; 184 conn = sk->sk_user_data;
185 if (!conn) { 185 if (!conn) {
186 write_space = sk->sk_write_space; 186 write_space = sk->sk_write_space;
@@ -200,7 +200,7 @@ void rds_tcp_write_space(struct sock *sk)
200 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 200 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
201 201
202out: 202out:
203 read_unlock(&sk->sk_callback_lock); 203 read_unlock_bh(&sk->sk_callback_lock);
204 204
205 /* 205 /*
206 * write_space is only called when data leaves tcp's send queue if 206 * write_space is only called when data leaves tcp's send queue if