aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-21 00:21:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-21 00:21:46 -0400
commit14b395e35d1afdd8019d11b92e28041fad591b71 (patch)
treecff7ba9bed7a38300b19a5bacc632979d64fd9c8 /net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
parent734b397cd14f3340394a8dd3266bec97d01f034b (diff)
parent5108b27651727b5aba0826e8fd7be71b42428701 (diff)
Merge branch 'for-2.6.27' of git://linux-nfs.org/~bfields/linux
* 'for-2.6.27' of git://linux-nfs.org/~bfields/linux: (51 commits) nfsd: nfs4xdr.c do-while is not a compound statement nfsd: Use C99 initializers in fs/nfsd/nfs4xdr.c lockd: Pass "struct sockaddr *" to new failover-by-IP function lockd: get host reference in nlmsvc_create_block() instead of callers lockd: minor svclock.c style fixes lockd: eliminate duplicate nlmsvc_lookup_host call from nlmsvc_lock lockd: eliminate duplicate nlmsvc_lookup_host call from nlmsvc_testlock lockd: nlm_release_host() checks for NULL, caller needn't file lock: reorder struct file_lock to save space on 64 bit builds nfsd: take file and mnt write in nfs4_upgrade_open nfsd: document open share bit tracking nfsd: tabulate nfs4 xdr encoding functions nfsd: dprint operation names svcrdma: Change WR context get/put to use the kmem cache svcrdma: Create a kmem cache for the WR contexts svcrdma: Add flush_scheduled_work to module exit function svcrdma: Limit ORD based on client's advertised IRD svcrdma: Remove unused wait q from svcrdma_xprt structure svcrdma: Remove unneeded spin locks from __svc_rdma_free svcrdma: Add dma map count and WARN_ON ...
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_recvfrom.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c84
1 files changed, 39 insertions, 45 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 06ab4841537b..b4b17f44cb29 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -112,11 +112,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
112 rqstp->rq_arg.tail[0].iov_len = 0; 112 rqstp->rq_arg.tail[0].iov_len = 0;
113} 113}
114 114
115struct chunk_sge {
116 int start; /* sge no for this chunk */
117 int count; /* sge count for this chunk */
118};
119
120/* Encode a read-chunk-list as an array of IB SGE 115/* Encode a read-chunk-list as an array of IB SGE
121 * 116 *
122 * Assumptions: 117 * Assumptions:
@@ -134,8 +129,8 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
134 struct svc_rqst *rqstp, 129 struct svc_rqst *rqstp,
135 struct svc_rdma_op_ctxt *head, 130 struct svc_rdma_op_ctxt *head,
136 struct rpcrdma_msg *rmsgp, 131 struct rpcrdma_msg *rmsgp,
137 struct ib_sge *sge, 132 struct svc_rdma_req_map *rpl_map,
138 struct chunk_sge *ch_sge_ary, 133 struct svc_rdma_req_map *chl_map,
139 int ch_count, 134 int ch_count,
140 int byte_count) 135 int byte_count)
141{ 136{
@@ -156,22 +151,18 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
156 head->arg.head[0] = rqstp->rq_arg.head[0]; 151 head->arg.head[0] = rqstp->rq_arg.head[0];
157 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 152 head->arg.tail[0] = rqstp->rq_arg.tail[0];
158 head->arg.pages = &head->pages[head->count]; 153 head->arg.pages = &head->pages[head->count];
159 head->sge[0].length = head->count; /* save count of hdr pages */ 154 head->hdr_count = head->count; /* save count of hdr pages */
160 head->arg.page_base = 0; 155 head->arg.page_base = 0;
161 head->arg.page_len = ch_bytes; 156 head->arg.page_len = ch_bytes;
162 head->arg.len = rqstp->rq_arg.len + ch_bytes; 157 head->arg.len = rqstp->rq_arg.len + ch_bytes;
163 head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes; 158 head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
164 head->count++; 159 head->count++;
165 ch_sge_ary[0].start = 0; 160 chl_map->ch[0].start = 0;
166 while (byte_count) { 161 while (byte_count) {
162 rpl_map->sge[sge_no].iov_base =
163 page_address(rqstp->rq_arg.pages[page_no]) + page_off;
167 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes); 164 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
168 sge[sge_no].addr = 165 rpl_map->sge[sge_no].iov_len = sge_bytes;
169 ib_dma_map_page(xprt->sc_cm_id->device,
170 rqstp->rq_arg.pages[page_no],
171 page_off, sge_bytes,
172 DMA_FROM_DEVICE);
173 sge[sge_no].length = sge_bytes;
174 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
175 /* 166 /*
176 * Don't bump head->count here because the same page 167 * Don't bump head->count here because the same page
177 * may be used by multiple SGE. 168 * may be used by multiple SGE.
@@ -187,11 +178,11 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
187 * SGE, move to the next SGE 178 * SGE, move to the next SGE
188 */ 179 */
189 if (ch_bytes == 0) { 180 if (ch_bytes == 0) {
190 ch_sge_ary[ch_no].count = 181 chl_map->ch[ch_no].count =
191 sge_no - ch_sge_ary[ch_no].start; 182 sge_no - chl_map->ch[ch_no].start;
192 ch_no++; 183 ch_no++;
193 ch++; 184 ch++;
194 ch_sge_ary[ch_no].start = sge_no; 185 chl_map->ch[ch_no].start = sge_no;
195 ch_bytes = ch->rc_target.rs_length; 186 ch_bytes = ch->rc_target.rs_length;
196 /* If bytes remaining account for next chunk */ 187 /* If bytes remaining account for next chunk */
197 if (byte_count) { 188 if (byte_count) {
@@ -220,18 +211,25 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
220 return sge_no; 211 return sge_no;
221} 212}
222 213
223static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt, 214static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
224 struct ib_sge *sge, 215 struct svc_rdma_op_ctxt *ctxt,
216 struct kvec *vec,
225 u64 *sgl_offset, 217 u64 *sgl_offset,
226 int count) 218 int count)
227{ 219{
228 int i; 220 int i;
229 221
230 ctxt->count = count; 222 ctxt->count = count;
223 ctxt->direction = DMA_FROM_DEVICE;
231 for (i = 0; i < count; i++) { 224 for (i = 0; i < count; i++) {
232 ctxt->sge[i].addr = sge[i].addr; 225 atomic_inc(&xprt->sc_dma_used);
233 ctxt->sge[i].length = sge[i].length; 226 ctxt->sge[i].addr =
234 *sgl_offset = *sgl_offset + sge[i].length; 227 ib_dma_map_single(xprt->sc_cm_id->device,
228 vec[i].iov_base, vec[i].iov_len,
229 DMA_FROM_DEVICE);
230 ctxt->sge[i].length = vec[i].iov_len;
231 ctxt->sge[i].lkey = xprt->sc_phys_mr->lkey;
232 *sgl_offset = *sgl_offset + vec[i].iov_len;
235 } 233 }
236} 234}
237 235
@@ -282,34 +280,29 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
282 struct ib_send_wr read_wr; 280 struct ib_send_wr read_wr;
283 int err = 0; 281 int err = 0;
284 int ch_no; 282 int ch_no;
285 struct ib_sge *sge;
286 int ch_count; 283 int ch_count;
287 int byte_count; 284 int byte_count;
288 int sge_count; 285 int sge_count;
289 u64 sgl_offset; 286 u64 sgl_offset;
290 struct rpcrdma_read_chunk *ch; 287 struct rpcrdma_read_chunk *ch;
291 struct svc_rdma_op_ctxt *ctxt = NULL; 288 struct svc_rdma_op_ctxt *ctxt = NULL;
292 struct svc_rdma_op_ctxt *tmp_sge_ctxt; 289 struct svc_rdma_req_map *rpl_map;
293 struct svc_rdma_op_ctxt *tmp_ch_ctxt; 290 struct svc_rdma_req_map *chl_map;
294 struct chunk_sge *ch_sge_ary;
295 291
296 /* If no read list is present, return 0 */ 292 /* If no read list is present, return 0 */
297 ch = svc_rdma_get_read_chunk(rmsgp); 293 ch = svc_rdma_get_read_chunk(rmsgp);
298 if (!ch) 294 if (!ch)
299 return 0; 295 return 0;
300 296
301 /* Allocate temporary contexts to keep SGE */ 297 /* Allocate temporary reply and chunk maps */
302 BUG_ON(sizeof(struct ib_sge) < sizeof(struct chunk_sge)); 298 rpl_map = svc_rdma_get_req_map();
303 tmp_sge_ctxt = svc_rdma_get_context(xprt); 299 chl_map = svc_rdma_get_req_map();
304 sge = tmp_sge_ctxt->sge;
305 tmp_ch_ctxt = svc_rdma_get_context(xprt);
306 ch_sge_ary = (struct chunk_sge *)tmp_ch_ctxt->sge;
307 300
308 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count); 301 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
309 if (ch_count > RPCSVC_MAXPAGES) 302 if (ch_count > RPCSVC_MAXPAGES)
310 return -EINVAL; 303 return -EINVAL;
311 sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp, 304 sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
312 sge, ch_sge_ary, 305 rpl_map, chl_map,
313 ch_count, byte_count); 306 ch_count, byte_count);
314 sgl_offset = 0; 307 sgl_offset = 0;
315 ch_no = 0; 308 ch_no = 0;
@@ -331,14 +324,15 @@ next_sge:
331 read_wr.wr.rdma.remote_addr = 324 read_wr.wr.rdma.remote_addr =
332 get_unaligned(&(ch->rc_target.rs_offset)) + 325 get_unaligned(&(ch->rc_target.rs_offset)) +
333 sgl_offset; 326 sgl_offset;
334 read_wr.sg_list = &sge[ch_sge_ary[ch_no].start]; 327 read_wr.sg_list = ctxt->sge;
335 read_wr.num_sge = 328 read_wr.num_sge =
336 rdma_read_max_sge(xprt, ch_sge_ary[ch_no].count); 329 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
337 rdma_set_ctxt_sge(ctxt, &sge[ch_sge_ary[ch_no].start], 330 rdma_set_ctxt_sge(xprt, ctxt,
331 &rpl_map->sge[chl_map->ch[ch_no].start],
338 &sgl_offset, 332 &sgl_offset,
339 read_wr.num_sge); 333 read_wr.num_sge);
340 if (((ch+1)->rc_discrim == 0) && 334 if (((ch+1)->rc_discrim == 0) &&
341 (read_wr.num_sge == ch_sge_ary[ch_no].count)) { 335 (read_wr.num_sge == chl_map->ch[ch_no].count)) {
342 /* 336 /*
343 * Mark the last RDMA_READ with a bit to 337 * Mark the last RDMA_READ with a bit to
344 * indicate all RPC data has been fetched from 338 * indicate all RPC data has been fetched from
@@ -358,9 +352,9 @@ next_sge:
358 } 352 }
359 atomic_inc(&rdma_stat_read); 353 atomic_inc(&rdma_stat_read);
360 354
361 if (read_wr.num_sge < ch_sge_ary[ch_no].count) { 355 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
362 ch_sge_ary[ch_no].count -= read_wr.num_sge; 356 chl_map->ch[ch_no].count -= read_wr.num_sge;
363 ch_sge_ary[ch_no].start += read_wr.num_sge; 357 chl_map->ch[ch_no].start += read_wr.num_sge;
364 goto next_sge; 358 goto next_sge;
365 } 359 }
366 sgl_offset = 0; 360 sgl_offset = 0;
@@ -368,8 +362,8 @@ next_sge:
368 } 362 }
369 363
370 out: 364 out:
371 svc_rdma_put_context(tmp_sge_ctxt, 0); 365 svc_rdma_put_req_map(rpl_map);
372 svc_rdma_put_context(tmp_ch_ctxt, 0); 366 svc_rdma_put_req_map(chl_map);
373 367
374 /* Detach arg pages. svc_recv will replenish them */ 368 /* Detach arg pages. svc_recv will replenish them */
375 for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) 369 for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
@@ -399,7 +393,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
399 rqstp->rq_pages[page_no] = head->pages[page_no]; 393 rqstp->rq_pages[page_no] = head->pages[page_no];
400 } 394 }
401 /* Point rq_arg.pages past header */ 395 /* Point rq_arg.pages past header */
402 rqstp->rq_arg.pages = &rqstp->rq_pages[head->sge[0].length]; 396 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
403 rqstp->rq_arg.page_len = head->arg.page_len; 397 rqstp->rq_arg.page_len = head->arg.page_len;
404 rqstp->rq_arg.page_base = head->arg.page_base; 398 rqstp->rq_arg.page_base = head->arg.page_base;
405 399