aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/tcp_recv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rds/tcp_recv.c')
-rw-r--r--net/rds/tcp_recv.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index c00dafffbb5a..e43797404102 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <net/tcp.h> 35#include <net/tcp.h>
35 36
36#include "rds.h" 37#include "rds.h"
@@ -97,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
97 goto out; 98 goto out;
98 } 99 }
99 100
101 rds_stats_add(s_copy_to_user, to_copy);
100 size -= to_copy; 102 size -= to_copy;
101 ret += to_copy; 103 ret += to_copy;
102 skb_off += to_copy; 104 skb_off += to_copy;
@@ -322,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
322 324
323 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
324 326
325 read_lock(&sk->sk_callback_lock); 327 read_lock_bh(&sk->sk_callback_lock);
326 conn = sk->sk_user_data; 328 conn = sk->sk_user_data;
327 if (conn == NULL) { /* check for teardown race */ 329 if (conn == NULL) { /* check for teardown race */
328 ready = sk->sk_data_ready; 330 ready = sk->sk_data_ready;
@@ -336,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
336 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
338out: 340out:
339 read_unlock(&sk->sk_callback_lock); 341 read_unlock_bh(&sk->sk_callback_lock);
340 ready(sk, bytes); 342 ready(sk, bytes);
341} 343}
342 344