aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/direct.c
diff options
context:
space:
mode:
authorFred Isaman <iisaman@netapp.com>2012-04-20 14:47:44 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-04-27 14:10:37 -0400
commitcd841605f7a721878d8a2d1362484723d8abf569 (patch)
treeb5c37db575cd545a183577249909e042fe38d646 /fs/nfs/direct.c
parentb5542849764aa56fd3f05c0041195b637b9d2ac2 (diff)
NFS: create common nfs_pgio_header for both read and write
In order to avoid duplicating all the data in nfs_read_data whenever we split it up into multiple RPC calls (either due to a short read result or due to rsize < PAGE_SIZE), we split out the bits that are the same per RPC call into a separate "header" structure. The goal this patch moves towards is to have a single header refcounted by several rpc_data structures. Thus, want to always refer from rpc_data to the header, and not the other way. This patch comes close to that ideal, but the directio code currently needs some special casing, isolated in the nfs_direct_[read_write]hdr_release() functions. This will be dealt with in a future patch. Signed-off-by: Fred Isaman <iisaman@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r--fs/nfs/direct.c73
1 files changed, 50 insertions, 23 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index fb7fbaa79c20..56176af1436f 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -242,7 +242,7 @@ static void nfs_direct_read_release(void *calldata)
242{ 242{
243 243
244 struct nfs_read_data *data = calldata; 244 struct nfs_read_data *data = calldata;
245 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 245 struct nfs_direct_req *dreq = (struct nfs_direct_req *)data->header->req;
246 int status = data->task.tk_status; 246 int status = data->task.tk_status;
247 247
248 spin_lock(&dreq->lock); 248 spin_lock(&dreq->lock);
@@ -269,6 +269,15 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
269 .rpc_release = nfs_direct_read_release, 269 .rpc_release = nfs_direct_read_release,
270}; 270};
271 271
272static void nfs_direct_readhdr_release(struct nfs_read_header *rhdr)
273{
274 struct nfs_read_data *data = &rhdr->rpc_data;
275
276 if (data->pagevec != data->page_array)
277 kfree(data->pagevec);
278 nfs_readhdr_free(&rhdr->header);
279}
280
272/* 281/*
273 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ 282 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
274 * operation. If nfs_readdata_alloc() or get_user_pages() fails, 283 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
@@ -301,6 +310,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
301 ssize_t started = 0; 310 ssize_t started = 0;
302 311
303 do { 312 do {
313 struct nfs_read_header *rhdr;
304 struct nfs_read_data *data; 314 struct nfs_read_data *data;
305 size_t bytes; 315 size_t bytes;
306 316
@@ -308,23 +318,24 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
308 bytes = min(rsize,count); 318 bytes = min(rsize,count);
309 319
310 result = -ENOMEM; 320 result = -ENOMEM;
311 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); 321 rhdr = nfs_readhdr_alloc(nfs_page_array_len(pgbase, bytes));
312 if (unlikely(!data)) 322 if (unlikely(!rhdr))
313 break; 323 break;
324 data = &rhdr->rpc_data;
314 325
315 down_read(&current->mm->mmap_sem); 326 down_read(&current->mm->mmap_sem);
316 result = get_user_pages(current, current->mm, user_addr, 327 result = get_user_pages(current, current->mm, user_addr,
317 data->npages, 1, 0, data->pagevec, NULL); 328 data->npages, 1, 0, data->pagevec, NULL);
318 up_read(&current->mm->mmap_sem); 329 up_read(&current->mm->mmap_sem);
319 if (result < 0) { 330 if (result < 0) {
320 nfs_readdata_free(data); 331 nfs_direct_readhdr_release(rhdr);
321 break; 332 break;
322 } 333 }
323 if ((unsigned)result < data->npages) { 334 if ((unsigned)result < data->npages) {
324 bytes = result * PAGE_SIZE; 335 bytes = result * PAGE_SIZE;
325 if (bytes <= pgbase) { 336 if (bytes <= pgbase) {
326 nfs_direct_release_pages(data->pagevec, result); 337 nfs_direct_release_pages(data->pagevec, result);
327 nfs_readdata_free(data); 338 nfs_direct_readhdr_release(rhdr);
328 break; 339 break;
329 } 340 }
330 bytes -= pgbase; 341 bytes -= pgbase;
@@ -333,9 +344,9 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
333 344
334 get_dreq(dreq); 345 get_dreq(dreq);
335 346
336 data->req = (struct nfs_page *) dreq; 347 rhdr->header.req = (struct nfs_page *) dreq;
337 data->inode = inode; 348 rhdr->header.inode = inode;
338 data->cred = msg.rpc_cred; 349 rhdr->header.cred = msg.rpc_cred;
339 data->args.fh = NFS_FH(inode); 350 data->args.fh = NFS_FH(inode);
340 data->args.context = get_nfs_open_context(ctx); 351 data->args.context = get_nfs_open_context(ctx);
341 data->args.lock_context = dreq->l_ctx; 352 data->args.lock_context = dreq->l_ctx;
@@ -447,13 +458,23 @@ out:
447 return result; 458 return result;
448} 459}
449 460
461static void nfs_direct_writehdr_release(struct nfs_write_header *whdr)
462{
463 struct nfs_write_data *data = &whdr->rpc_data;
464
465 if (data->pagevec != data->page_array)
466 kfree(data->pagevec);
467 nfs_writehdr_free(&whdr->header);
468}
469
450static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 470static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
451{ 471{
452 while (!list_empty(&dreq->rewrite_list)) { 472 while (!list_empty(&dreq->rewrite_list)) {
453 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); 473 struct nfs_pgio_header *hdr = list_entry(dreq->rewrite_list.next, struct nfs_pgio_header, pages);
454 list_del(&data->pages); 474 struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
455 nfs_direct_release_pages(data->pagevec, data->npages); 475 list_del(&hdr->pages);
456 nfs_writedata_free(data); 476 nfs_direct_release_pages(whdr->rpc_data.pagevec, whdr->rpc_data.npages);
477 nfs_direct_writehdr_release(whdr);
457 } 478 }
458} 479}
459 480
@@ -463,6 +484,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
463 struct inode *inode = dreq->inode; 484 struct inode *inode = dreq->inode;
464 struct list_head *p; 485 struct list_head *p;
465 struct nfs_write_data *data; 486 struct nfs_write_data *data;
487 struct nfs_pgio_header *hdr;
466 struct rpc_task *task; 488 struct rpc_task *task;
467 struct rpc_message msg = { 489 struct rpc_message msg = {
468 .rpc_cred = dreq->ctx->cred, 490 .rpc_cred = dreq->ctx->cred,
@@ -479,7 +501,8 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
479 get_dreq(dreq); 501 get_dreq(dreq);
480 502
481 list_for_each(p, &dreq->rewrite_list) { 503 list_for_each(p, &dreq->rewrite_list) {
482 data = list_entry(p, struct nfs_write_data, pages); 504 hdr = list_entry(p, struct nfs_pgio_header, pages);
505 data = &(container_of(hdr, struct nfs_write_header, header))->rpc_data;
483 506
484 get_dreq(dreq); 507 get_dreq(dreq);
485 508
@@ -652,7 +675,8 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
652static void nfs_direct_write_release(void *calldata) 675static void nfs_direct_write_release(void *calldata)
653{ 676{
654 struct nfs_write_data *data = calldata; 677 struct nfs_write_data *data = calldata;
655 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 678 struct nfs_pgio_header *hdr = data->header;
679 struct nfs_direct_req *dreq = (struct nfs_direct_req *) hdr->req;
656 int status = data->task.tk_status; 680 int status = data->task.tk_status;
657 681
658 spin_lock(&dreq->lock); 682 spin_lock(&dreq->lock);
@@ -684,7 +708,7 @@ out_unlock:
684 spin_unlock(&dreq->lock); 708 spin_unlock(&dreq->lock);
685 709
686 if (put_dreq(dreq)) 710 if (put_dreq(dreq))
687 nfs_direct_write_complete(dreq, data->inode); 711 nfs_direct_write_complete(dreq, hdr->inode);
688} 712}
689 713
690static const struct rpc_call_ops nfs_write_direct_ops = { 714static const struct rpc_call_ops nfs_write_direct_ops = {
@@ -725,6 +749,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
725 ssize_t started = 0; 749 ssize_t started = 0;
726 750
727 do { 751 do {
752 struct nfs_write_header *whdr;
728 struct nfs_write_data *data; 753 struct nfs_write_data *data;
729 size_t bytes; 754 size_t bytes;
730 755
@@ -732,23 +757,25 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
732 bytes = min(wsize,count); 757 bytes = min(wsize,count);
733 758
734 result = -ENOMEM; 759 result = -ENOMEM;
735 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); 760 whdr = nfs_writehdr_alloc(nfs_page_array_len(pgbase, bytes));
736 if (unlikely(!data)) 761 if (unlikely(!whdr))
737 break; 762 break;
738 763
764 data = &whdr->rpc_data;
765
739 down_read(&current->mm->mmap_sem); 766 down_read(&current->mm->mmap_sem);
740 result = get_user_pages(current, current->mm, user_addr, 767 result = get_user_pages(current, current->mm, user_addr,
741 data->npages, 0, 0, data->pagevec, NULL); 768 data->npages, 0, 0, data->pagevec, NULL);
742 up_read(&current->mm->mmap_sem); 769 up_read(&current->mm->mmap_sem);
743 if (result < 0) { 770 if (result < 0) {
744 nfs_writedata_free(data); 771 nfs_direct_writehdr_release(whdr);
745 break; 772 break;
746 } 773 }
747 if ((unsigned)result < data->npages) { 774 if ((unsigned)result < data->npages) {
748 bytes = result * PAGE_SIZE; 775 bytes = result * PAGE_SIZE;
749 if (bytes <= pgbase) { 776 if (bytes <= pgbase) {
750 nfs_direct_release_pages(data->pagevec, result); 777 nfs_direct_release_pages(data->pagevec, result);
751 nfs_writedata_free(data); 778 nfs_direct_writehdr_release(whdr);
752 break; 779 break;
753 } 780 }
754 bytes -= pgbase; 781 bytes -= pgbase;
@@ -757,11 +784,11 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
757 784
758 get_dreq(dreq); 785 get_dreq(dreq);
759 786
760 list_move_tail(&data->pages, &dreq->rewrite_list); 787 list_move_tail(&whdr->header.pages, &dreq->rewrite_list);
761 788
762 data->req = (struct nfs_page *) dreq; 789 whdr->header.req = (struct nfs_page *) dreq;
763 data->inode = inode; 790 whdr->header.inode = inode;
764 data->cred = msg.rpc_cred; 791 whdr->header.cred = msg.rpc_cred;
765 data->args.fh = NFS_FH(inode); 792 data->args.fh = NFS_FH(inode);
766 data->args.context = ctx; 793 data->args.context = ctx;
767 data->args.lock_context = dreq->l_ctx; 794 data->args.lock_context = dreq->l_ctx;