aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
diff options
context:
space:
mode:
authorTom Tucker <tom@ogc.us>2012-02-15 12:30:00 -0500
committerJ. Bruce Fields <bfields@redhat.com>2012-02-17 18:38:50 -0500
commitcec56c8ff5e28f58ff13041dca7853738ae577a1 (patch)
treefb21c7337e046cb2361faca4931907e28fadcb5d /net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
parent2c8bd7e0d1b66b2f8f267fd6ab62a30569c792c0 (diff)
svcrdma: Cleanup sparse warnings in the svcrdma module
The svcrdma transport was un-marshalling requests in-place. This resulted in sparse warnings due to __beXX data containing both NBO and HBO data. The code has been restructured to do byte-swapping as the header is parsed instead of when the header is validated immediately after receipt. Also moved extern declarations for the workqueue and memory pools to the private header file. Signed-off-by: Tom Tucker <tom@ogc.us> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_recvfrom.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index df67211c4baf..41cb63b623df 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
147 page_off = 0; 147 page_off = 0;
148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
149 ch_no = 0; 149 ch_no = 0;
150 ch_bytes = ch->rc_target.rs_length; 150 ch_bytes = ntohl(ch->rc_target.rs_length);
151 head->arg.head[0] = rqstp->rq_arg.head[0]; 151 head->arg.head[0] = rqstp->rq_arg.head[0];
152 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 152 head->arg.tail[0] = rqstp->rq_arg.tail[0];
153 head->arg.pages = &head->pages[head->count]; 153 head->arg.pages = &head->pages[head->count];
@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
183 ch_no++; 183 ch_no++;
184 ch++; 184 ch++;
185 chl_map->ch[ch_no].start = sge_no; 185 chl_map->ch[ch_no].start = sge_no;
186 ch_bytes = ch->rc_target.rs_length; 186 ch_bytes = ntohl(ch->rc_target.rs_length);
187 /* If bytes remaining account for next chunk */ 187 /* If bytes remaining account for next chunk */
188 if (byte_count) { 188 if (byte_count) {
189 head->arg.page_len += ch_bytes; 189 head->arg.page_len += ch_bytes;
@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
281 offset = 0; 281 offset = 0;
282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
283 for (ch_no = 0; ch_no < ch_count; ch_no++) { 283 for (ch_no = 0; ch_no < ch_count; ch_no++) {
284 int len = ntohl(ch->rc_target.rs_length);
284 rpl_map->sge[ch_no].iov_base = frmr->kva + offset; 285 rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
285 rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; 286 rpl_map->sge[ch_no].iov_len = len;
286 chl_map->ch[ch_no].count = 1; 287 chl_map->ch[ch_no].count = 1;
287 chl_map->ch[ch_no].start = ch_no; 288 chl_map->ch[ch_no].start = ch_no;
288 offset += ch->rc_target.rs_length; 289 offset += len;
289 ch++; 290 ch++;
290 } 291 }
291 292
@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
316 for (i = 0; i < count; i++) { 317 for (i = 0; i < count; i++) {
317 ctxt->sge[i].length = 0; /* in case map fails */ 318 ctxt->sge[i].length = 0; /* in case map fails */
318 if (!frmr) { 319 if (!frmr) {
319 BUG_ON(0 == virt_to_page(vec[i].iov_base)); 320 BUG_ON(!virt_to_page(vec[i].iov_base));
320 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; 321 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
321 ctxt->sge[i].addr = 322 ctxt->sge[i].addr =
322 ib_dma_map_page(xprt->sc_cm_id->device, 323 ib_dma_map_page(xprt->sc_cm_id->device,
@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
426 427
427 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 428 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
428 ch->rc_discrim != 0; ch++, ch_no++) { 429 ch->rc_discrim != 0; ch++, ch_no++) {
430 u64 rs_offset;
429next_sge: 431next_sge:
430 ctxt = svc_rdma_get_context(xprt); 432 ctxt = svc_rdma_get_context(xprt);
431 ctxt->direction = DMA_FROM_DEVICE; 433 ctxt->direction = DMA_FROM_DEVICE;
@@ -440,10 +442,10 @@ next_sge:
440 read_wr.opcode = IB_WR_RDMA_READ; 442 read_wr.opcode = IB_WR_RDMA_READ;
441 ctxt->wr_op = read_wr.opcode; 443 ctxt->wr_op = read_wr.opcode;
442 read_wr.send_flags = IB_SEND_SIGNALED; 444 read_wr.send_flags = IB_SEND_SIGNALED;
443 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; 445 read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
444 read_wr.wr.rdma.remote_addr = 446 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
445 get_unaligned(&(ch->rc_target.rs_offset)) + 447 &rs_offset);
446 sgl_offset; 448 read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
447 read_wr.sg_list = ctxt->sge; 449 read_wr.sg_list = ctxt->sge;
448 read_wr.num_sge = 450 read_wr.num_sge =
449 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); 451 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);