aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2014-07-09 14:49:15 -0400
committerJ. Bruce Fields <bfields@redhat.com>2014-07-11 15:03:48 -0400
commit255942907e7ff498ab1545b5edce5690833ff640 (patch)
treea10154fa4de9d81f2bfcf0020b1d8c3c5b48e690 /net/sunrpc/xprtrdma
parenta46cb7f2878d22b5df190970416cea40982ec2fb (diff)
svcrdma: send_write() must not overflow the device's max sge
Function send_write() must stop creating sges when it reaches the device max and return the amount sent in the RDMA Write to the caller. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c39
1 files changed, 15 insertions, 24 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 49fd21a5c215..9f1b50689c0f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -192,6 +192,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
192 xdr_sge_no++; 192 xdr_sge_no++;
193 BUG_ON(xdr_sge_no > vec->count); 193 BUG_ON(xdr_sge_no > vec->count);
194 bc -= sge_bytes; 194 bc -= sge_bytes;
195 if (sge_no == xprt->sc_max_sge)
196 break;
195 } 197 }
196 198
197 /* Prepare WRITE WR */ 199 /* Prepare WRITE WR */
@@ -209,7 +211,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
209 atomic_inc(&rdma_stat_write); 211 atomic_inc(&rdma_stat_write);
210 if (svc_rdma_send(xprt, &write_wr)) 212 if (svc_rdma_send(xprt, &write_wr))
211 goto err; 213 goto err;
212 return 0; 214 return write_len - bc;
213 err: 215 err:
214 svc_rdma_unmap_dma(ctxt); 216 svc_rdma_unmap_dma(ctxt);
215 svc_rdma_put_context(ctxt, 0); 217 svc_rdma_put_context(ctxt, 0);
@@ -225,7 +227,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
225{ 227{
226 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; 228 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
227 int write_len; 229 int write_len;
228 int max_write;
229 u32 xdr_off; 230 u32 xdr_off;
230 int chunk_off; 231 int chunk_off;
231 int chunk_no; 232 int chunk_no;
@@ -239,8 +240,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
239 res_ary = (struct rpcrdma_write_array *) 240 res_ary = (struct rpcrdma_write_array *)
240 &rdma_resp->rm_body.rm_chunks[1]; 241 &rdma_resp->rm_body.rm_chunks[1];
241 242
242 max_write = xprt->sc_max_sge * PAGE_SIZE;
243
244 /* Write chunks start at the pagelist */ 243 /* Write chunks start at the pagelist */
245 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; 244 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
246 xfer_len && chunk_no < arg_ary->wc_nchunks; 245 xfer_len && chunk_no < arg_ary->wc_nchunks;
@@ -260,23 +259,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
260 write_len); 259 write_len);
261 chunk_off = 0; 260 chunk_off = 0;
262 while (write_len) { 261 while (write_len) {
263 int this_write;
264 this_write = min(write_len, max_write);
265 ret = send_write(xprt, rqstp, 262 ret = send_write(xprt, rqstp,
266 ntohl(arg_ch->rs_handle), 263 ntohl(arg_ch->rs_handle),
267 rs_offset + chunk_off, 264 rs_offset + chunk_off,
268 xdr_off, 265 xdr_off,
269 this_write, 266 write_len,
270 vec); 267 vec);
271 if (ret) { 268 if (ret <= 0) {
272 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 269 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
273 ret); 270 ret);
274 return -EIO; 271 return -EIO;
275 } 272 }
276 chunk_off += this_write; 273 chunk_off += ret;
277 xdr_off += this_write; 274 xdr_off += ret;
278 xfer_len -= this_write; 275 xfer_len -= ret;
279 write_len -= this_write; 276 write_len -= ret;
280 } 277 }
281 } 278 }
282 /* Update the req with the number of chunks actually used */ 279 /* Update the req with the number of chunks actually used */
@@ -293,7 +290,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
293{ 290{
294 u32 xfer_len = rqstp->rq_res.len; 291 u32 xfer_len = rqstp->rq_res.len;
295 int write_len; 292 int write_len;
296 int max_write;
297 u32 xdr_off; 293 u32 xdr_off;
298 int chunk_no; 294 int chunk_no;
299 int chunk_off; 295 int chunk_off;
@@ -311,8 +307,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
311 res_ary = (struct rpcrdma_write_array *) 307 res_ary = (struct rpcrdma_write_array *)
312 &rdma_resp->rm_body.rm_chunks[2]; 308 &rdma_resp->rm_body.rm_chunks[2];
313 309
314 max_write = xprt->sc_max_sge * PAGE_SIZE;
315
316 /* xdr offset starts at RPC message */ 310 /* xdr offset starts at RPC message */
317 nchunks = ntohl(arg_ary->wc_nchunks); 311 nchunks = ntohl(arg_ary->wc_nchunks);
318 for (xdr_off = 0, chunk_no = 0; 312 for (xdr_off = 0, chunk_no = 0;
@@ -330,24 +324,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
330 write_len); 324 write_len);
331 chunk_off = 0; 325 chunk_off = 0;
332 while (write_len) { 326 while (write_len) {
333 int this_write;
334
335 this_write = min(write_len, max_write);
336 ret = send_write(xprt, rqstp, 327 ret = send_write(xprt, rqstp,
337 ntohl(ch->rs_handle), 328 ntohl(ch->rs_handle),
338 rs_offset + chunk_off, 329 rs_offset + chunk_off,
339 xdr_off, 330 xdr_off,
340 this_write, 331 write_len,
341 vec); 332 vec);
342 if (ret) { 333 if (ret <= 0) {
343 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 334 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
344 ret); 335 ret);
345 return -EIO; 336 return -EIO;
346 } 337 }
347 chunk_off += this_write; 338 chunk_off += ret;
348 xdr_off += this_write; 339 xdr_off += ret;
349 xfer_len -= this_write; 340 xfer_len -= ret;
350 write_len -= this_write; 341 write_len -= ret;
351 } 342 }
352 } 343 }
353 /* Update the req with the number of chunks actually used */ 344 /* Update the req with the number of chunks actually used */