aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-04-08 15:17:38 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-08 15:17:38 -0400
commit1ec1e23d1d43ce96fe9a3b96ffbf3fbb702a8db7 (patch)
tree74cfc543b82a49fd7b920aade205cdca1ecb586b
parent0ad2a8365975d6794d79a4e4dde60fcc036692c7 (diff)
parent443be0e5affe3acb6dd81e7402951677e0a0eb35 (diff)
Merge branch 'rds'
Sowmini Varadhan says: ==================== RDS: RDS-core fixes This patch-series updates the RDS core and rds-tcp modules with some bug fixes that were originally authored by Andy Grover, Zach Brown, and Chris Mason. v2: Code review comment by Sergei Shtylov V3: DaveM comments: - dropped patches 3, 5 for "heuristic" changes in rds_send_xmit(). Investigation into the root-cause of these IB-triggered changes produced the feedback: "I don't remember seeing "RDS: Stuck RM" message in last 1-1.5 years and checking with other folks. It may very well be some old workaround for stale connection for which long term fix is already made and this part of code not exercised anymore." Any such fixes, *if* they are needed, can/should be done in the IB specific RDS transport modules. - similarly dropped the LL_SEND_FULL patch (patch 6 in v2 set) v4: Documentation/networking/rds.txt contains incorrect references to "missing sysctl values for pf_rds and sol_rds in mainline". The sysctl values were never needed in mainline, thus fix the documentation. v5: Clarify comment per http://www.spinics.net/lists/netdev/msg324220.html v6: Re-added entire version history to cover letter. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/networking/rds.txt9
-rw-r--r--net/rds/connection.c3
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rds/send.c33
4 files changed, 38 insertions, 8 deletions
diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt
index c67077cbeb80..e1a3d59bbe0f 100644
--- a/Documentation/networking/rds.txt
+++ b/Documentation/networking/rds.txt
@@ -62,11 +62,10 @@ Socket Interface
62================ 62================
63 63
64 AF_RDS, PF_RDS, SOL_RDS 64 AF_RDS, PF_RDS, SOL_RDS
65 These constants haven't been assigned yet, because RDS isn't in 65 AF_RDS and PF_RDS are the domain type to be used with socket(2)
66 mainline yet. Currently, the kernel module assigns some constant 66 to create RDS sockets. SOL_RDS is the socket-level to be used
67 and publishes it to user space through two sysctl files 67 with setsockopt(2) and getsockopt(2) for RDS specific socket
68 /proc/sys/net/rds/pf_rds 68 options.
69 /proc/sys/net/rds/sol_rds
70 69
71 fd = socket(PF_RDS, SOCK_SEQPACKET, 0); 70 fd = socket(PF_RDS, SOCK_SEQPACKET, 0);
72 This creates a new, unbound RDS socket. 71 This creates a new, unbound RDS socket.
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 378c3a6acf84..14f041398ca1 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -130,7 +130,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
130 rcu_read_lock(); 130 rcu_read_lock();
131 conn = rds_conn_lookup(head, laddr, faddr, trans); 131 conn = rds_conn_lookup(head, laddr, faddr, trans);
132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
133 !is_outgoing) { 133 laddr == faddr && !is_outgoing) {
134 /* This is a looped back IB connection, and we're 134 /* This is a looped back IB connection, and we're
135 * called by the code handling the incoming connect. 135 * called by the code handling the incoming connect.
136 * We need a second connection object into which we 136 * We need a second connection object into which we
@@ -193,6 +193,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
193 } 193 }
194 194
195 atomic_set(&conn->c_state, RDS_CONN_DOWN); 195 atomic_set(&conn->c_state, RDS_CONN_DOWN);
196 conn->c_send_gen = 0;
196 conn->c_reconnect_jiffies = 0; 197 conn->c_reconnect_jiffies = 0;
197 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); 198 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
198 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); 199 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c2a5eef41343..02d8fd5b40c0 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -110,6 +110,7 @@ struct rds_connection {
110 void *c_transport_data; 110 void *c_transport_data;
111 111
112 atomic_t c_state; 112 atomic_t c_state;
113 unsigned long c_send_gen;
113 unsigned long c_flags; 114 unsigned long c_flags;
114 unsigned long c_reconnect_jiffies; 115 unsigned long c_reconnect_jiffies;
115 struct delayed_work c_send_w; 116 struct delayed_work c_send_w;
diff --git a/net/rds/send.c b/net/rds/send.c
index 42f65d4305c8..49f77efd82b9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -140,8 +140,11 @@ int rds_send_xmit(struct rds_connection *conn)
140 struct scatterlist *sg; 140 struct scatterlist *sg;
141 int ret = 0; 141 int ret = 0;
142 LIST_HEAD(to_be_dropped); 142 LIST_HEAD(to_be_dropped);
143 int batch_count;
144 unsigned long send_gen = 0;
143 145
144restart: 146restart:
147 batch_count = 0;
145 148
146 /* 149 /*
147 * sendmsg calls here after having queued its message on the send 150 * sendmsg calls here after having queued its message on the send
@@ -157,6 +160,17 @@ restart:
157 } 160 }
158 161
159 /* 162 /*
163 * we record the send generation after doing the xmit acquire.
164 * if someone else manages to jump in and do some work, we'll use
165 * this to avoid a goto restart farther down.
166 *
167 * The acquire_in_xmit() check above ensures that only one
168 * caller can increment c_send_gen at any time.
169 */
170 conn->c_send_gen++;
171 send_gen = conn->c_send_gen;
172
173 /*
160 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, 174 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
161 * we do the opposite to avoid races. 175 * we do the opposite to avoid races.
162 */ 176 */
@@ -202,6 +216,16 @@ restart:
202 if (!rm) { 216 if (!rm) {
203 unsigned int len; 217 unsigned int len;
204 218
219 batch_count++;
220
221 /* we want to process as big a batch as we can, but
222 * we also want to avoid softlockups. If we've been
223 * through a lot of messages, lets back off and see
224 * if anyone else jumps in
225 */
226 if (batch_count >= 1024)
227 goto over_batch;
228
205 spin_lock_irqsave(&conn->c_lock, flags); 229 spin_lock_irqsave(&conn->c_lock, flags);
206 230
207 if (!list_empty(&conn->c_send_queue)) { 231 if (!list_empty(&conn->c_send_queue)) {
@@ -357,9 +381,9 @@ restart:
357 } 381 }
358 } 382 }
359 383
384over_batch:
360 if (conn->c_trans->xmit_complete) 385 if (conn->c_trans->xmit_complete)
361 conn->c_trans->xmit_complete(conn); 386 conn->c_trans->xmit_complete(conn);
362
363 release_in_xmit(conn); 387 release_in_xmit(conn);
364 388
365 /* Nuke any messages we decided not to retransmit. */ 389 /* Nuke any messages we decided not to retransmit. */
@@ -380,10 +404,15 @@ restart:
380 * If the transport cannot continue (i.e ret != 0), then it must 404 * If the transport cannot continue (i.e ret != 0), then it must
381 * call us when more room is available, such as from the tx 405 * call us when more room is available, such as from the tx
382 * completion handler. 406 * completion handler.
407 *
408 * We have an extra generation check here so that if someone manages
409 * to jump in after our release_in_xmit, we'll see that they have done
410 * some work and we will skip our goto
383 */ 411 */
384 if (ret == 0) { 412 if (ret == 0) {
385 smp_mb(); 413 smp_mb();
386 if (!list_empty(&conn->c_send_queue)) { 414 if (!list_empty(&conn->c_send_queue) &&
415 send_gen == conn->c_send_gen) {
387 rds_stats_inc(s_send_lock_queue_raced); 416 rds_stats_inc(s_send_lock_queue_raced);
388 goto restart; 417 goto restart;
389 } 418 }