diff options
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_sendto.c')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 196 |
1 files changed, 133 insertions, 63 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index df57f3ce6cd2..4f1b1c4f45f9 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -50,9 +50,15 @@ | |||
50 | 50 | ||
51 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 51 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
52 | 52 | ||
53 | static u32 xdr_padsize(u32 len) | ||
54 | { | ||
55 | return (len & 3) ? (4 - (len & 3)) : 0; | ||
56 | } | ||
57 | |||
53 | int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, | 58 | int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, |
54 | struct xdr_buf *xdr, | 59 | struct xdr_buf *xdr, |
55 | struct svc_rdma_req_map *vec) | 60 | struct svc_rdma_req_map *vec, |
61 | bool write_chunk_present) | ||
56 | { | 62 | { |
57 | int sge_no; | 63 | int sge_no; |
58 | u32 sge_bytes; | 64 | u32 sge_bytes; |
@@ -92,9 +98,20 @@ int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, | |||
92 | 98 | ||
93 | /* Tail SGE */ | 99 | /* Tail SGE */ |
94 | if (xdr->tail[0].iov_len) { | 100 | if (xdr->tail[0].iov_len) { |
95 | vec->sge[sge_no].iov_base = xdr->tail[0].iov_base; | 101 | unsigned char *base = xdr->tail[0].iov_base; |
96 | vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; | 102 | size_t len = xdr->tail[0].iov_len; |
97 | sge_no++; | 103 | u32 xdr_pad = xdr_padsize(xdr->page_len); |
104 | |||
105 | if (write_chunk_present && xdr_pad) { | ||
106 | base += xdr_pad; | ||
107 | len -= xdr_pad; | ||
108 | } | ||
109 | |||
110 | if (len) { | ||
111 | vec->sge[sge_no].iov_base = base; | ||
112 | vec->sge[sge_no].iov_len = len; | ||
113 | sge_no++; | ||
114 | } | ||
98 | } | 115 | } |
99 | 116 | ||
100 | dprintk("svcrdma: %s: sge_no %d page_no %d " | 117 | dprintk("svcrdma: %s: sge_no %d page_no %d " |
@@ -166,10 +183,10 @@ svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp) | |||
166 | * reply array is present | 183 | * reply array is present |
167 | */ | 184 | */ |
168 | static struct rpcrdma_write_array * | 185 | static struct rpcrdma_write_array * |
169 | svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp) | 186 | svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp, |
187 | struct rpcrdma_write_array *wr_ary) | ||
170 | { | 188 | { |
171 | struct rpcrdma_read_chunk *rch; | 189 | struct rpcrdma_read_chunk *rch; |
172 | struct rpcrdma_write_array *wr_ary; | ||
173 | struct rpcrdma_write_array *rp_ary; | 190 | struct rpcrdma_write_array *rp_ary; |
174 | 191 | ||
175 | /* XXX: Need to fix when reply chunk may occur with read list | 192 | /* XXX: Need to fix when reply chunk may occur with read list |
@@ -191,7 +208,6 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp) | |||
191 | goto found_it; | 208 | goto found_it; |
192 | } | 209 | } |
193 | 210 | ||
194 | wr_ary = svc_rdma_get_write_array(rmsgp); | ||
195 | if (wr_ary) { | 211 | if (wr_ary) { |
196 | int chunk = be32_to_cpu(wr_ary->wc_nchunks); | 212 | int chunk = be32_to_cpu(wr_ary->wc_nchunks); |
197 | 213 | ||
@@ -281,8 +297,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
281 | 297 | ||
282 | /* Prepare WRITE WR */ | 298 | /* Prepare WRITE WR */ |
283 | memset(&write_wr, 0, sizeof write_wr); | 299 | memset(&write_wr, 0, sizeof write_wr); |
284 | ctxt->wr_op = IB_WR_RDMA_WRITE; | 300 | ctxt->cqe.done = svc_rdma_wc_write; |
285 | write_wr.wr.wr_id = (unsigned long)ctxt; | 301 | write_wr.wr.wr_cqe = &ctxt->cqe; |
286 | write_wr.wr.sg_list = &sge[0]; | 302 | write_wr.wr.sg_list = &sge[0]; |
287 | write_wr.wr.num_sge = sge_no; | 303 | write_wr.wr.num_sge = sge_no; |
288 | write_wr.wr.opcode = IB_WR_RDMA_WRITE; | 304 | write_wr.wr.opcode = IB_WR_RDMA_WRITE; |
@@ -298,41 +314,37 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
298 | err: | 314 | err: |
299 | svc_rdma_unmap_dma(ctxt); | 315 | svc_rdma_unmap_dma(ctxt); |
300 | svc_rdma_put_context(ctxt, 0); | 316 | svc_rdma_put_context(ctxt, 0); |
301 | /* Fatal error, close transport */ | ||
302 | return -EIO; | 317 | return -EIO; |
303 | } | 318 | } |
304 | 319 | ||
320 | noinline | ||
305 | static int send_write_chunks(struct svcxprt_rdma *xprt, | 321 | static int send_write_chunks(struct svcxprt_rdma *xprt, |
306 | struct rpcrdma_msg *rdma_argp, | 322 | struct rpcrdma_write_array *wr_ary, |
307 | struct rpcrdma_msg *rdma_resp, | 323 | struct rpcrdma_msg *rdma_resp, |
308 | struct svc_rqst *rqstp, | 324 | struct svc_rqst *rqstp, |
309 | struct svc_rdma_req_map *vec) | 325 | struct svc_rdma_req_map *vec) |
310 | { | 326 | { |
311 | u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; | 327 | u32 xfer_len = rqstp->rq_res.page_len; |
312 | int write_len; | 328 | int write_len; |
313 | u32 xdr_off; | 329 | u32 xdr_off; |
314 | int chunk_off; | 330 | int chunk_off; |
315 | int chunk_no; | 331 | int chunk_no; |
316 | int nchunks; | 332 | int nchunks; |
317 | struct rpcrdma_write_array *arg_ary; | ||
318 | struct rpcrdma_write_array *res_ary; | 333 | struct rpcrdma_write_array *res_ary; |
319 | int ret; | 334 | int ret; |
320 | 335 | ||
321 | arg_ary = svc_rdma_get_write_array(rdma_argp); | ||
322 | if (!arg_ary) | ||
323 | return 0; | ||
324 | res_ary = (struct rpcrdma_write_array *) | 336 | res_ary = (struct rpcrdma_write_array *) |
325 | &rdma_resp->rm_body.rm_chunks[1]; | 337 | &rdma_resp->rm_body.rm_chunks[1]; |
326 | 338 | ||
327 | /* Write chunks start at the pagelist */ | 339 | /* Write chunks start at the pagelist */ |
328 | nchunks = be32_to_cpu(arg_ary->wc_nchunks); | 340 | nchunks = be32_to_cpu(wr_ary->wc_nchunks); |
329 | for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; | 341 | for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; |
330 | xfer_len && chunk_no < nchunks; | 342 | xfer_len && chunk_no < nchunks; |
331 | chunk_no++) { | 343 | chunk_no++) { |
332 | struct rpcrdma_segment *arg_ch; | 344 | struct rpcrdma_segment *arg_ch; |
333 | u64 rs_offset; | 345 | u64 rs_offset; |
334 | 346 | ||
335 | arg_ch = &arg_ary->wc_array[chunk_no].wc_target; | 347 | arg_ch = &wr_ary->wc_array[chunk_no].wc_target; |
336 | write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length)); | 348 | write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length)); |
337 | 349 | ||
338 | /* Prepare the response chunk given the length actually | 350 | /* Prepare the response chunk given the length actually |
@@ -350,11 +362,8 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, | |||
350 | xdr_off, | 362 | xdr_off, |
351 | write_len, | 363 | write_len, |
352 | vec); | 364 | vec); |
353 | if (ret <= 0) { | 365 | if (ret <= 0) |
354 | dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", | 366 | goto out_err; |
355 | ret); | ||
356 | return -EIO; | ||
357 | } | ||
358 | chunk_off += ret; | 367 | chunk_off += ret; |
359 | xdr_off += ret; | 368 | xdr_off += ret; |
360 | xfer_len -= ret; | 369 | xfer_len -= ret; |
@@ -364,11 +373,16 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, | |||
364 | /* Update the req with the number of chunks actually used */ | 373 | /* Update the req with the number of chunks actually used */ |
365 | svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no); | 374 | svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no); |
366 | 375 | ||
367 | return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; | 376 | return rqstp->rq_res.page_len; |
377 | |||
378 | out_err: | ||
379 | pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret); | ||
380 | return -EIO; | ||
368 | } | 381 | } |
369 | 382 | ||
383 | noinline | ||
370 | static int send_reply_chunks(struct svcxprt_rdma *xprt, | 384 | static int send_reply_chunks(struct svcxprt_rdma *xprt, |
371 | struct rpcrdma_msg *rdma_argp, | 385 | struct rpcrdma_write_array *rp_ary, |
372 | struct rpcrdma_msg *rdma_resp, | 386 | struct rpcrdma_msg *rdma_resp, |
373 | struct svc_rqst *rqstp, | 387 | struct svc_rqst *rqstp, |
374 | struct svc_rdma_req_map *vec) | 388 | struct svc_rdma_req_map *vec) |
@@ -380,25 +394,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
380 | int chunk_off; | 394 | int chunk_off; |
381 | int nchunks; | 395 | int nchunks; |
382 | struct rpcrdma_segment *ch; | 396 | struct rpcrdma_segment *ch; |
383 | struct rpcrdma_write_array *arg_ary; | ||
384 | struct rpcrdma_write_array *res_ary; | 397 | struct rpcrdma_write_array *res_ary; |
385 | int ret; | 398 | int ret; |
386 | 399 | ||
387 | arg_ary = svc_rdma_get_reply_array(rdma_argp); | ||
388 | if (!arg_ary) | ||
389 | return 0; | ||
390 | /* XXX: need to fix when reply lists occur with read-list and or | 400 | /* XXX: need to fix when reply lists occur with read-list and or |
391 | * write-list */ | 401 | * write-list */ |
392 | res_ary = (struct rpcrdma_write_array *) | 402 | res_ary = (struct rpcrdma_write_array *) |
393 | &rdma_resp->rm_body.rm_chunks[2]; | 403 | &rdma_resp->rm_body.rm_chunks[2]; |
394 | 404 | ||
395 | /* xdr offset starts at RPC message */ | 405 | /* xdr offset starts at RPC message */ |
396 | nchunks = be32_to_cpu(arg_ary->wc_nchunks); | 406 | nchunks = be32_to_cpu(rp_ary->wc_nchunks); |
397 | for (xdr_off = 0, chunk_no = 0; | 407 | for (xdr_off = 0, chunk_no = 0; |
398 | xfer_len && chunk_no < nchunks; | 408 | xfer_len && chunk_no < nchunks; |
399 | chunk_no++) { | 409 | chunk_no++) { |
400 | u64 rs_offset; | 410 | u64 rs_offset; |
401 | ch = &arg_ary->wc_array[chunk_no].wc_target; | 411 | ch = &rp_ary->wc_array[chunk_no].wc_target; |
402 | write_len = min(xfer_len, be32_to_cpu(ch->rs_length)); | 412 | write_len = min(xfer_len, be32_to_cpu(ch->rs_length)); |
403 | 413 | ||
404 | /* Prepare the reply chunk given the length actually | 414 | /* Prepare the reply chunk given the length actually |
@@ -415,11 +425,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
415 | xdr_off, | 425 | xdr_off, |
416 | write_len, | 426 | write_len, |
417 | vec); | 427 | vec); |
418 | if (ret <= 0) { | 428 | if (ret <= 0) |
419 | dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", | 429 | goto out_err; |
420 | ret); | ||
421 | return -EIO; | ||
422 | } | ||
423 | chunk_off += ret; | 430 | chunk_off += ret; |
424 | xdr_off += ret; | 431 | xdr_off += ret; |
425 | xfer_len -= ret; | 432 | xfer_len -= ret; |
@@ -430,6 +437,10 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
430 | svc_rdma_xdr_encode_reply_array(res_ary, chunk_no); | 437 | svc_rdma_xdr_encode_reply_array(res_ary, chunk_no); |
431 | 438 | ||
432 | return rqstp->rq_res.len; | 439 | return rqstp->rq_res.len; |
440 | |||
441 | out_err: | ||
442 | pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret); | ||
443 | return -EIO; | ||
433 | } | 444 | } |
434 | 445 | ||
435 | /* This function prepares the portion of the RPCRDMA message to be | 446 | /* This function prepares the portion of the RPCRDMA message to be |
@@ -464,13 +475,8 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
464 | int pages; | 475 | int pages; |
465 | int ret; | 476 | int ret; |
466 | 477 | ||
467 | /* Post a recv buffer to handle another request. */ | 478 | ret = svc_rdma_repost_recv(rdma, GFP_KERNEL); |
468 | ret = svc_rdma_post_recv(rdma, GFP_KERNEL); | ||
469 | if (ret) { | 479 | if (ret) { |
470 | printk(KERN_INFO | ||
471 | "svcrdma: could not post a receive buffer, err=%d." | ||
472 | "Closing transport %p.\n", ret, rdma); | ||
473 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); | ||
474 | svc_rdma_put_context(ctxt, 0); | 480 | svc_rdma_put_context(ctxt, 0); |
475 | return -ENOTCONN; | 481 | return -ENOTCONN; |
476 | } | 482 | } |
@@ -543,8 +549,8 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
543 | goto err; | 549 | goto err; |
544 | } | 550 | } |
545 | memset(&send_wr, 0, sizeof send_wr); | 551 | memset(&send_wr, 0, sizeof send_wr); |
546 | ctxt->wr_op = IB_WR_SEND; | 552 | ctxt->cqe.done = svc_rdma_wc_send; |
547 | send_wr.wr_id = (unsigned long)ctxt; | 553 | send_wr.wr_cqe = &ctxt->cqe; |
548 | send_wr.sg_list = ctxt->sge; | 554 | send_wr.sg_list = ctxt->sge; |
549 | send_wr.num_sge = sge_no; | 555 | send_wr.num_sge = sge_no; |
550 | send_wr.opcode = IB_WR_SEND; | 556 | send_wr.opcode = IB_WR_SEND; |
@@ -559,6 +565,7 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
559 | err: | 565 | err: |
560 | svc_rdma_unmap_dma(ctxt); | 566 | svc_rdma_unmap_dma(ctxt); |
561 | svc_rdma_put_context(ctxt, 1); | 567 | svc_rdma_put_context(ctxt, 1); |
568 | pr_err("svcrdma: failed to send reply, rc=%d\n", ret); | ||
562 | return -EIO; | 569 | return -EIO; |
563 | } | 570 | } |
564 | 571 | ||
@@ -573,7 +580,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
573 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | 580 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
574 | struct rpcrdma_msg *rdma_argp; | 581 | struct rpcrdma_msg *rdma_argp; |
575 | struct rpcrdma_msg *rdma_resp; | 582 | struct rpcrdma_msg *rdma_resp; |
576 | struct rpcrdma_write_array *reply_ary; | 583 | struct rpcrdma_write_array *wr_ary, *rp_ary; |
577 | enum rpcrdma_proc reply_type; | 584 | enum rpcrdma_proc reply_type; |
578 | int ret; | 585 | int ret; |
579 | int inline_bytes; | 586 | int inline_bytes; |
@@ -587,12 +594,14 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
587 | * places this at the start of page 0. | 594 | * places this at the start of page 0. |
588 | */ | 595 | */ |
589 | rdma_argp = page_address(rqstp->rq_pages[0]); | 596 | rdma_argp = page_address(rqstp->rq_pages[0]); |
597 | wr_ary = svc_rdma_get_write_array(rdma_argp); | ||
598 | rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary); | ||
590 | 599 | ||
591 | /* Build an req vec for the XDR */ | 600 | /* Build an req vec for the XDR */ |
592 | ctxt = svc_rdma_get_context(rdma); | 601 | ctxt = svc_rdma_get_context(rdma); |
593 | ctxt->direction = DMA_TO_DEVICE; | 602 | ctxt->direction = DMA_TO_DEVICE; |
594 | vec = svc_rdma_get_req_map(rdma); | 603 | vec = svc_rdma_get_req_map(rdma); |
595 | ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec); | 604 | ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); |
596 | if (ret) | 605 | if (ret) |
597 | goto err0; | 606 | goto err0; |
598 | inline_bytes = rqstp->rq_res.len; | 607 | inline_bytes = rqstp->rq_res.len; |
@@ -603,8 +612,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
603 | if (!res_page) | 612 | if (!res_page) |
604 | goto err0; | 613 | goto err0; |
605 | rdma_resp = page_address(res_page); | 614 | rdma_resp = page_address(res_page); |
606 | reply_ary = svc_rdma_get_reply_array(rdma_argp); | 615 | if (rp_ary) |
607 | if (reply_ary) | ||
608 | reply_type = RDMA_NOMSG; | 616 | reply_type = RDMA_NOMSG; |
609 | else | 617 | else |
610 | reply_type = RDMA_MSG; | 618 | reply_type = RDMA_MSG; |
@@ -612,27 +620,26 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
612 | rdma_resp, reply_type); | 620 | rdma_resp, reply_type); |
613 | 621 | ||
614 | /* Send any write-chunk data and build resp write-list */ | 622 | /* Send any write-chunk data and build resp write-list */ |
615 | ret = send_write_chunks(rdma, rdma_argp, rdma_resp, | 623 | if (wr_ary) { |
616 | rqstp, vec); | 624 | ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec); |
617 | if (ret < 0) { | 625 | if (ret < 0) |
618 | printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n", | 626 | goto err1; |
619 | ret); | 627 | inline_bytes -= ret + xdr_padsize(ret); |
620 | goto err1; | ||
621 | } | 628 | } |
622 | inline_bytes -= ret; | ||
623 | 629 | ||
624 | /* Send any reply-list data and update resp reply-list */ | 630 | /* Send any reply-list data and update resp reply-list */ |
625 | ret = send_reply_chunks(rdma, rdma_argp, rdma_resp, | 631 | if (rp_ary) { |
626 | rqstp, vec); | 632 | ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec); |
627 | if (ret < 0) { | 633 | if (ret < 0) |
628 | printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n", | 634 | goto err1; |
629 | ret); | 635 | inline_bytes -= ret; |
630 | goto err1; | ||
631 | } | 636 | } |
632 | inline_bytes -= ret; | ||
633 | 637 | ||
634 | ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, | 638 | ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, |
635 | inline_bytes); | 639 | inline_bytes); |
640 | if (ret < 0) | ||
641 | goto err1; | ||
642 | |||
636 | svc_rdma_put_req_map(rdma, vec); | 643 | svc_rdma_put_req_map(rdma, vec); |
637 | dprintk("svcrdma: send_reply returns %d\n", ret); | 644 | dprintk("svcrdma: send_reply returns %d\n", ret); |
638 | return ret; | 645 | return ret; |
@@ -642,5 +649,68 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
642 | err0: | 649 | err0: |
643 | svc_rdma_put_req_map(rdma, vec); | 650 | svc_rdma_put_req_map(rdma, vec); |
644 | svc_rdma_put_context(ctxt, 0); | 651 | svc_rdma_put_context(ctxt, 0); |
645 | return ret; | 652 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); |
653 | return -ENOTCONN; | ||
654 | } | ||
655 | |||
656 | void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | ||
657 | int status) | ||
658 | { | ||
659 | struct ib_send_wr err_wr; | ||
660 | struct page *p; | ||
661 | struct svc_rdma_op_ctxt *ctxt; | ||
662 | enum rpcrdma_errcode err; | ||
663 | __be32 *va; | ||
664 | int length; | ||
665 | int ret; | ||
666 | |||
667 | ret = svc_rdma_repost_recv(xprt, GFP_KERNEL); | ||
668 | if (ret) | ||
669 | return; | ||
670 | |||
671 | p = alloc_page(GFP_KERNEL); | ||
672 | if (!p) | ||
673 | return; | ||
674 | va = page_address(p); | ||
675 | |||
676 | /* XDR encode an error reply */ | ||
677 | err = ERR_CHUNK; | ||
678 | if (status == -EPROTONOSUPPORT) | ||
679 | err = ERR_VERS; | ||
680 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); | ||
681 | |||
682 | ctxt = svc_rdma_get_context(xprt); | ||
683 | ctxt->direction = DMA_TO_DEVICE; | ||
684 | ctxt->count = 1; | ||
685 | ctxt->pages[0] = p; | ||
686 | |||
687 | /* Prepare SGE for local address */ | ||
688 | ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey; | ||
689 | ctxt->sge[0].length = length; | ||
690 | ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, | ||
691 | p, 0, length, DMA_TO_DEVICE); | ||
692 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { | ||
693 | dprintk("svcrdma: Error mapping buffer for protocol error\n"); | ||
694 | svc_rdma_put_context(ctxt, 1); | ||
695 | return; | ||
696 | } | ||
697 | atomic_inc(&xprt->sc_dma_used); | ||
698 | |||
699 | /* Prepare SEND WR */ | ||
700 | memset(&err_wr, 0, sizeof(err_wr)); | ||
701 | ctxt->cqe.done = svc_rdma_wc_send; | ||
702 | err_wr.wr_cqe = &ctxt->cqe; | ||
703 | err_wr.sg_list = ctxt->sge; | ||
704 | err_wr.num_sge = 1; | ||
705 | err_wr.opcode = IB_WR_SEND; | ||
706 | err_wr.send_flags = IB_SEND_SIGNALED; | ||
707 | |||
708 | /* Post It */ | ||
709 | ret = svc_rdma_send(xprt, &err_wr); | ||
710 | if (ret) { | ||
711 | dprintk("svcrdma: Error %d posting send for protocol error\n", | ||
712 | ret); | ||
713 | svc_rdma_unmap_dma(ctxt); | ||
714 | svc_rdma_put_context(ctxt, 1); | ||
715 | } | ||
646 | } | 716 | } |