diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2015-01-21 11:02:13 -0500 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2015-01-30 10:47:48 -0500 |
commit | 284f4902a632584e8d73cf7d9363f819adf7240c (patch) | |
tree | d9e0b2061ddb268c6297bd25e5342dad7a4eebe1 /net/sunrpc/xprtrdma | |
parent | 8502427ccd9500cefc1ad47655371f9121934845 (diff) |
xprtrdma: Modernize htonl and ntohl
Clean up: Replace htonl and ntohl with the be32 equivalents.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 48 |
1 files changed, 26 insertions, 22 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index df01d124936c..a6fb30b0a8cc 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -209,9 +209,11 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
209 | if (cur_rchunk) { /* read */ | 209 | if (cur_rchunk) { /* read */ |
210 | cur_rchunk->rc_discrim = xdr_one; | 210 | cur_rchunk->rc_discrim = xdr_one; |
211 | /* all read chunks have the same "position" */ | 211 | /* all read chunks have the same "position" */ |
212 | cur_rchunk->rc_position = htonl(pos); | 212 | cur_rchunk->rc_position = cpu_to_be32(pos); |
213 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | 213 | cur_rchunk->rc_target.rs_handle = |
214 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | 214 | cpu_to_be32(seg->mr_rkey); |
215 | cur_rchunk->rc_target.rs_length = | ||
216 | cpu_to_be32(seg->mr_len); | ||
215 | xdr_encode_hyper( | 217 | xdr_encode_hyper( |
216 | (__be32 *)&cur_rchunk->rc_target.rs_offset, | 218 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
217 | seg->mr_base); | 219 | seg->mr_base); |
@@ -222,8 +224,10 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
222 | cur_rchunk++; | 224 | cur_rchunk++; |
223 | r_xprt->rx_stats.read_chunk_count++; | 225 | r_xprt->rx_stats.read_chunk_count++; |
224 | } else { /* write/reply */ | 226 | } else { /* write/reply */ |
225 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | 227 | cur_wchunk->wc_target.rs_handle = |
226 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | 228 | cpu_to_be32(seg->mr_rkey); |
229 | cur_wchunk->wc_target.rs_length = | ||
230 | cpu_to_be32(seg->mr_len); | ||
227 | xdr_encode_hyper( | 231 | xdr_encode_hyper( |
228 | (__be32 *)&cur_wchunk->wc_target.rs_offset, | 232 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
229 | seg->mr_base); | 233 | seg->mr_base); |
@@ -257,7 +261,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
257 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 261 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
258 | } else { | 262 | } else { |
259 | warray->wc_discrim = xdr_one; | 263 | warray->wc_discrim = xdr_one; |
260 | warray->wc_nchunks = htonl(nchunks); | 264 | warray->wc_nchunks = cpu_to_be32(nchunks); |
261 | iptr = (__be32 *) cur_wchunk; | 265 | iptr = (__be32 *) cur_wchunk; |
262 | if (type == rpcrdma_writech) { | 266 | if (type == rpcrdma_writech) { |
263 | *iptr++ = xdr_zero; /* finish the write chunk list */ | 267 | *iptr++ = xdr_zero; /* finish the write chunk list */ |
@@ -404,11 +408,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
404 | 408 | ||
405 | /* build RDMA header in private area at front */ | 409 | /* build RDMA header in private area at front */ |
406 | headerp = (struct rpcrdma_msg *) req->rl_base; | 410 | headerp = (struct rpcrdma_msg *) req->rl_base; |
407 | /* don't htonl XID, it's already done in request */ | 411 | /* don't byte-swap XID, it's already done in request */ |
408 | headerp->rm_xid = rqst->rq_xid; | 412 | headerp->rm_xid = rqst->rq_xid; |
409 | headerp->rm_vers = xdr_one; | 413 | headerp->rm_vers = rpcrdma_version; |
410 | headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests); | 414 | headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); |
411 | headerp->rm_type = htonl(RDMA_MSG); | 415 | headerp->rm_type = rdma_msg; |
412 | 416 | ||
413 | /* | 417 | /* |
414 | * Chunks needed for results? | 418 | * Chunks needed for results? |
@@ -482,11 +486,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
482 | RPCRDMA_INLINE_PAD_VALUE(rqst)); | 486 | RPCRDMA_INLINE_PAD_VALUE(rqst)); |
483 | 487 | ||
484 | if (padlen) { | 488 | if (padlen) { |
485 | headerp->rm_type = htonl(RDMA_MSGP); | 489 | headerp->rm_type = rdma_msgp; |
486 | headerp->rm_body.rm_padded.rm_align = | 490 | headerp->rm_body.rm_padded.rm_align = |
487 | htonl(RPCRDMA_INLINE_PAD_VALUE(rqst)); | 491 | cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst)); |
488 | headerp->rm_body.rm_padded.rm_thresh = | 492 | headerp->rm_body.rm_padded.rm_thresh = |
489 | htonl(RPCRDMA_INLINE_PAD_THRESH); | 493 | cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH); |
490 | headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; | 494 | headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; |
491 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; | 495 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; |
492 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; | 496 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; |
@@ -570,7 +574,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b | |||
570 | unsigned int i, total_len; | 574 | unsigned int i, total_len; |
571 | struct rpcrdma_write_chunk *cur_wchunk; | 575 | struct rpcrdma_write_chunk *cur_wchunk; |
572 | 576 | ||
573 | i = ntohl(**iptrp); /* get array count */ | 577 | i = be32_to_cpu(**iptrp); |
574 | if (i > max) | 578 | if (i > max) |
575 | return -1; | 579 | return -1; |
576 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); | 580 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); |
@@ -582,11 +586,11 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b | |||
582 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); | 586 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
583 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", | 587 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
584 | __func__, | 588 | __func__, |
585 | ntohl(seg->rs_length), | 589 | be32_to_cpu(seg->rs_length), |
586 | (unsigned long long)off, | 590 | (unsigned long long)off, |
587 | ntohl(seg->rs_handle)); | 591 | be32_to_cpu(seg->rs_handle)); |
588 | } | 592 | } |
589 | total_len += ntohl(seg->rs_length); | 593 | total_len += be32_to_cpu(seg->rs_length); |
590 | ++cur_wchunk; | 594 | ++cur_wchunk; |
591 | } | 595 | } |
592 | /* check and adjust for properly terminated write chunk */ | 596 | /* check and adjust for properly terminated write chunk */ |
@@ -749,9 +753,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
749 | goto repost; | 753 | goto repost; |
750 | } | 754 | } |
751 | headerp = (struct rpcrdma_msg *) rep->rr_base; | 755 | headerp = (struct rpcrdma_msg *) rep->rr_base; |
752 | if (headerp->rm_vers != xdr_one) { | 756 | if (headerp->rm_vers != rpcrdma_version) { |
753 | dprintk("RPC: %s: invalid version %d\n", | 757 | dprintk("RPC: %s: invalid version %d\n", |
754 | __func__, ntohl(headerp->rm_vers)); | 758 | __func__, be32_to_cpu(headerp->rm_vers)); |
755 | goto repost; | 759 | goto repost; |
756 | } | 760 | } |
757 | 761 | ||
@@ -793,7 +797,7 @@ repost: | |||
793 | /* check for expected message types */ | 797 | /* check for expected message types */ |
794 | /* The order of some of these tests is important. */ | 798 | /* The order of some of these tests is important. */ |
795 | switch (headerp->rm_type) { | 799 | switch (headerp->rm_type) { |
796 | case htonl(RDMA_MSG): | 800 | case rdma_msg: |
797 | /* never expect read chunks */ | 801 | /* never expect read chunks */ |
798 | /* never expect reply chunks (two ways to check) */ | 802 | /* never expect reply chunks (two ways to check) */ |
799 | /* never expect write chunks without having offered RDMA */ | 803 | /* never expect write chunks without having offered RDMA */ |
@@ -832,7 +836,7 @@ repost: | |||
832 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); | 836 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); |
833 | break; | 837 | break; |
834 | 838 | ||
835 | case htonl(RDMA_NOMSG): | 839 | case rdma_nomsg: |
836 | /* never expect read or write chunks, always reply chunks */ | 840 | /* never expect read or write chunks, always reply chunks */ |
837 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | 841 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || |
838 | headerp->rm_body.rm_chunks[1] != xdr_zero || | 842 | headerp->rm_body.rm_chunks[1] != xdr_zero || |
@@ -853,7 +857,7 @@ badheader: | |||
853 | dprintk("%s: invalid rpcrdma reply header (type %d):" | 857 | dprintk("%s: invalid rpcrdma reply header (type %d):" |
854 | " chunks[012] == %d %d %d" | 858 | " chunks[012] == %d %d %d" |
855 | " expected chunks <= %d\n", | 859 | " expected chunks <= %d\n", |
856 | __func__, ntohl(headerp->rm_type), | 860 | __func__, be32_to_cpu(headerp->rm_type), |
857 | headerp->rm_body.rm_chunks[0], | 861 | headerp->rm_body.rm_chunks[0], |
858 | headerp->rm_body.rm_chunks[1], | 862 | headerp->rm_body.rm_chunks[1], |
859 | headerp->rm_body.rm_chunks[2], | 863 | headerp->rm_body.rm_chunks[2], |