aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 23:01:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 23:01:26 -0400
commit087afe8aaf562dc7a53f2577049830d6a3245742 (patch)
tree94fe422e62965b24030019368cb9ec4f9c90cd38 /net/rds
parent54cf809b9512be95f53ed4a5e3b631d1ac42f0fa (diff)
parent95829b3a9c0b1d88778b23bc2afdf5a83de066ff (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes and more updates from David Miller: 1) Tunneling fixes from Tom Herbert and Alexander Duyck. 2) AF_UNIX updates some struct sock bit fields with the socket lock, whereas setsockopt() sets overlapping ones with locking. Seperate out the synchronized vs. the AF_UNIX unsynchronized ones to avoid corruption. From Andrey Ryabinin. 3) Mount BPF filesystem with mount_nodev rather than mount_ns, from Eric Biederman. 4) A couple kmemdup conversions, from Muhammad Falak R Wani. 5) BPF verifier fixes from Alexei Starovoitov. 6) Don't let tunneled UDP packets get stuck in socket queues, if something goes wrong during the encapsulation just drop the packet rather than signalling an error up the call stack. From Hannes Frederic Sowa. 7) SKB ref after free in batman-adv, from Florian Westphal. 8) TCP iSCSI, ocfs2, rds, and tipc have to disable BH in it's TCP callbacks since the TCP stack runs pre-emptibly now. From Eric Dumazet. 9) Fix crash in fixed_phy_add, from Rabin Vincent. 10) Fix length checks in xen-netback, from Paul Durrant. 11) Fix mixup in KEY vs KEYID macsec attributes, from Sabrina Dubroca. 12) RDS connection spamming bug fixes from Sowmini Varadhan * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (152 commits) net: suppress warnings on dev_alloc_skb uapi glibc compat: fix compilation when !__USE_MISC in glibc udp: prevent skbs lingering in tunnel socket queues bpf: teach verifier to recognize imm += ptr pattern bpf: support decreasing order in direct packet access net: usb: ch9200: use kmemdup ps3_gelic: use kmemdup net:liquidio: use kmemdup bpf: Use mount_nodev not mount_ns to mount the bpf filesystem net: cdc_ncm: update datagram size after changing mtu tuntap: correctly wake up process during uninit intel: Add support for IPv6 IP-in-IP offload ip6_gre: Do not allow segmentation offloads GRE_CSUM is enabled with FOU/GUE RDS: TCP: Avoid rds connection churn from rogue SYNs RDS: TCP: rds_tcp_accept_worker() must exit gracefully when terminating rds-tcp net: sock: move ->sk_shutdown out of bitfields. ipv6: Don't reset inner headers in ip6_tnl_xmit ip4ip6: Support for GSO/GRO ip6ip6: Support for GSO/GRO ipv6: Set features for IPv6 tunnels ...
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c17
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rds/tcp_send.c4
4 files changed, 17 insertions, 12 deletions
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 49a3fcfed360..fb82e0a0bf89 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
43 struct rds_connection *conn; 43 struct rds_connection *conn;
44 struct rds_tcp_connection *tc; 44 struct rds_tcp_connection *tc;
45 45
46 read_lock(&sk->sk_callback_lock); 46 read_lock_bh(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (!conn) { 48 if (!conn) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
@@ -69,7 +69,7 @@ void rds_tcp_state_change(struct sock *sk)
69 break; 69 break;
70 } 70 }
71out: 71out:
72 read_unlock(&sk->sk_callback_lock); 72 read_unlock_bh(&sk->sk_callback_lock);
73 state_change(sk); 73 state_change(sk);
74} 74}
75 75
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index be263cdf268b..4bf4befe5066 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -80,6 +80,9 @@ int rds_tcp_accept_one(struct socket *sock)
80 int conn_state; 80 int conn_state;
81 struct sock *nsk; 81 struct sock *nsk;
82 82
83 if (!sock) /* module unload or netns delete in progress */
84 return -ENETUNREACH;
85
83 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, 86 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
84 sock->sk->sk_type, sock->sk->sk_protocol, 87 sock->sk->sk_type, sock->sk->sk_protocol,
85 &new_sock); 88 &new_sock);
@@ -129,11 +132,13 @@ int rds_tcp_accept_one(struct socket *sock)
129 * so we must quiesce any send threads before resetting 132 * so we must quiesce any send threads before resetting
130 * c_transport_data. 133 * c_transport_data.
131 */ 134 */
132 wait_event(conn->c_waitq, 135 if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) ||
133 !test_bit(RDS_IN_XMIT, &conn->c_flags)); 136 !conn->c_outgoing) {
134 if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
135 goto rst_nsk; 137 goto rst_nsk;
136 } else if (rs_tcp->t_sock) { 138 } else {
139 atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
140 wait_event(conn->c_waitq,
141 !test_bit(RDS_IN_XMIT, &conn->c_flags));
137 rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); 142 rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
138 conn->c_outgoing = 0; 143 conn->c_outgoing = 0;
139 } 144 }
@@ -166,7 +171,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
166 171
167 rdsdebug("listen data ready sk %p\n", sk); 172 rdsdebug("listen data ready sk %p\n", sk);
168 173
169 read_lock(&sk->sk_callback_lock); 174 read_lock_bh(&sk->sk_callback_lock);
170 ready = sk->sk_user_data; 175 ready = sk->sk_user_data;
171 if (!ready) { /* check for teardown race */ 176 if (!ready) { /* check for teardown race */
172 ready = sk->sk_data_ready; 177 ready = sk->sk_data_ready;
@@ -183,7 +188,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
183 rds_tcp_accept_work(sk); 188 rds_tcp_accept_work(sk);
184 189
185out: 190out:
186 read_unlock(&sk->sk_callback_lock); 191 read_unlock_bh(&sk->sk_callback_lock);
187 ready(sk); 192 ready(sk);
188} 193}
189 194
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index d75d8b56a9e3..c3196f9d070a 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -301,7 +301,7 @@ void rds_tcp_data_ready(struct sock *sk)
301 301
302 rdsdebug("data ready sk %p\n", sk); 302 rdsdebug("data ready sk %p\n", sk);
303 303
304 read_lock(&sk->sk_callback_lock); 304 read_lock_bh(&sk->sk_callback_lock);
305 conn = sk->sk_user_data; 305 conn = sk->sk_user_data;
306 if (!conn) { /* check for teardown race */ 306 if (!conn) { /* check for teardown race */
307 ready = sk->sk_data_ready; 307 ready = sk->sk_data_ready;
@@ -315,7 +315,7 @@ void rds_tcp_data_ready(struct sock *sk)
315 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) 315 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
316 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 316 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
317out: 317out:
318 read_unlock(&sk->sk_callback_lock); 318 read_unlock_bh(&sk->sk_callback_lock);
319 ready(sk); 319 ready(sk);
320} 320}
321 321
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 2894e6095e3b..22d0f2020a79 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -180,7 +180,7 @@ void rds_tcp_write_space(struct sock *sk)
180 struct rds_connection *conn; 180 struct rds_connection *conn;
181 struct rds_tcp_connection *tc; 181 struct rds_tcp_connection *tc;
182 182
183 read_lock(&sk->sk_callback_lock); 183 read_lock_bh(&sk->sk_callback_lock);
184 conn = sk->sk_user_data; 184 conn = sk->sk_user_data;
185 if (!conn) { 185 if (!conn) {
186 write_space = sk->sk_write_space; 186 write_space = sk->sk_write_space;
@@ -200,7 +200,7 @@ void rds_tcp_write_space(struct sock *sk)
200 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 200 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
201 201
202out: 202out:
203 read_unlock(&sk->sk_callback_lock); 203 read_unlock_bh(&sk->sk_callback_lock);
204 204
205 /* 205 /*
206 * write_space is only called when data leaves tcp's send queue if 206 * write_space is only called when data leaves tcp's send queue if