diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 12db63580427..f877b88091ce 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; | 181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; |
182 | struct rpcrdma_write_array *warray = NULL; | 182 | struct rpcrdma_write_array *warray = NULL; |
183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | 183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; |
184 | u32 *iptr = headerp->rm_body.rm_chunks; | 184 | __be32 *iptr = headerp->rm_body.rm_chunks; |
185 | 185 | ||
186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | 186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { |
187 | /* a read chunk - server will RDMA Read our memory */ | 187 | /* a read chunk - server will RDMA Read our memory */ |
@@ -217,7 +217,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | 217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); |
218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | 218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); |
219 | xdr_encode_hyper( | 219 | xdr_encode_hyper( |
220 | (u32 *)&cur_rchunk->rc_target.rs_offset, | 220 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
221 | seg->mr_base); | 221 | seg->mr_base); |
222 | dprintk("RPC: %s: read chunk " | 222 | dprintk("RPC: %s: read chunk " |
223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, | 223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, |
@@ -229,7 +229,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | 229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); |
230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | 230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); |
231 | xdr_encode_hyper( | 231 | xdr_encode_hyper( |
232 | (u32 *)&cur_wchunk->wc_target.rs_offset, | 232 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
233 | seg->mr_base); | 233 | seg->mr_base); |
234 | dprintk("RPC: %s: %s chunk " | 234 | dprintk("RPC: %s: %s chunk " |
235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, | 235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, |
@@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
257 | * finish off header. If write, marshal discrim and nchunks. | 257 | * finish off header. If write, marshal discrim and nchunks. |
258 | */ | 258 | */ |
259 | if (cur_rchunk) { | 259 | if (cur_rchunk) { |
260 | iptr = (u32 *) cur_rchunk; | 260 | iptr = (__be32 *) cur_rchunk; |
261 | *iptr++ = xdr_zero; /* finish the read chunk list */ | 261 | *iptr++ = xdr_zero; /* finish the read chunk list */ |
262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ | 262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ |
263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
264 | } else { | 264 | } else { |
265 | warray->wc_discrim = xdr_one; | 265 | warray->wc_discrim = xdr_one; |
266 | warray->wc_nchunks = htonl(nchunks); | 266 | warray->wc_nchunks = htonl(nchunks); |
267 | iptr = (u32 *) cur_wchunk; | 267 | iptr = (__be32 *) cur_wchunk; |
268 | if (type == rpcrdma_writech) { | 268 | if (type == rpcrdma_writech) { |
269 | *iptr++ = xdr_zero; /* finish the write chunk list */ | 269 | *iptr++ = xdr_zero; /* finish the write chunk list */ |
270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
@@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | 559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) |
560 | */ | 560 | */ |
561 | static int | 561 | static int |
562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | 562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp) |
563 | { | 563 | { |
564 | unsigned int i, total_len; | 564 | unsigned int i, total_len; |
565 | struct rpcrdma_write_chunk *cur_wchunk; | 565 | struct rpcrdma_write_chunk *cur_wchunk; |
@@ -573,7 +573,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | 573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; |
574 | ifdebug(FACILITY) { | 574 | ifdebug(FACILITY) { |
575 | u64 off; | 575 | u64 off; |
576 | xdr_decode_hyper((u32 *)&seg->rs_offset, &off); | 576 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", | 577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
578 | __func__, | 578 | __func__, |
579 | ntohl(seg->rs_length), | 579 | ntohl(seg->rs_length), |
@@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
585 | } | 585 | } |
586 | /* check and adjust for properly terminated write chunk */ | 586 | /* check and adjust for properly terminated write chunk */ |
587 | if (wrchunk) { | 587 | if (wrchunk) { |
588 | u32 *w = (u32 *) cur_wchunk; | 588 | __be32 *w = (__be32 *) cur_wchunk; |
589 | if (*w++ != xdr_zero) | 589 | if (*w++ != xdr_zero) |
590 | return -1; | 590 | return -1; |
591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | 591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
@@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) | 593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) |
594 | return -1; | 594 | return -1; |
595 | 595 | ||
596 | *iptrp = (u32 *) cur_wchunk; | 596 | *iptrp = (__be32 *) cur_wchunk; |
597 | return total_len; | 597 | return total_len; |
598 | } | 598 | } |
599 | 599 | ||
@@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
721 | struct rpc_rqst *rqst; | 721 | struct rpc_rqst *rqst; |
722 | struct rpc_xprt *xprt = rep->rr_xprt; | 722 | struct rpc_xprt *xprt = rep->rr_xprt; |
723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
724 | u32 *iptr; | 724 | __be32 *iptr; |
725 | int i, rdmalen, status; | 725 | int i, rdmalen, status; |
726 | 726 | ||
727 | /* Check status. If bad, signal disconnect and return rep to pool */ | 727 | /* Check status. If bad, signal disconnect and return rep to pool */ |
@@ -801,7 +801,7 @@ repost: | |||
801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | 801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
802 | } else { | 802 | } else { |
803 | /* else ordinary inline */ | 803 | /* else ordinary inline */ |
804 | iptr = (u32 *)((unsigned char *)headerp + 28); | 804 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
805 | rep->rr_len -= 28; /*sizeof *headerp;*/ | 805 | rep->rr_len -= 28; /*sizeof *headerp;*/ |
806 | status = rep->rr_len; | 806 | status = rep->rr_len; |
807 | } | 807 | } |
@@ -816,7 +816,7 @@ repost: | |||
816 | headerp->rm_body.rm_chunks[2] != xdr_one || | 816 | headerp->rm_body.rm_chunks[2] != xdr_one || |
817 | req->rl_nchunks == 0) | 817 | req->rl_nchunks == 0) |
818 | goto badheader; | 818 | goto badheader; |
819 | iptr = (u32 *)((unsigned char *)headerp + 28); | 819 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); | 820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
821 | if (rdmalen < 0) | 821 | if (rdmalen < 0) |
822 | goto badheader; | 822 | goto badheader; |