aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
authorAndy Adamson <andros@netapp.com>2009-04-03 01:28:15 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-04-03 20:41:17 -0400
commit074fe897536f095309c5aaffcf912952882ab2cb (patch)
treed4bb2fff645a2df1dbdec4af6787359d888a3695 /fs/nfsd
parentf9bb94c4c60f6e1d1717077bfddb614f03a607d1 (diff)
nfsd41: DRC save, restore, and clear functions
Cache all the result pages, including the rpc header in rq_respages[0], for a request in the slot table cache entry. Cache the statp pointer from nfsd_dispatch which points into rq_respages[0] just past the rpc header. When setting a cache entry, calculate and save the length of the nfs data minus the rpc header for rq_respages[0]. When replaying a cache entry, replace the cached rpc header with the replayed request rpc result header, unless there is not enough room in the cached results first page. In that case, use the cached rpc header. The sessions fore channel maxresponse size cached is set to NFSD_PAGES_PER_SLOT * PAGE_SIZE. For compounds we are cacheing with operations such as READDIR that use the xdr_buf->pages to hold data, we choose to cache the extra page of data rather than copying data from xdr_buf->pages into the xdr_buf->head page. [nfsd41: limit cache to maxresponsesize_cached] [nfsd41: mv nfsd4_set_statp under CONFIG_NFSD_V4_1] [nfsd41: rename nfsd4_move_pages] [nfsd41: rename page_no variable] [nfsd41: rename nfsd4_set_cache_entry] [nfsd41: fix nfsd41_copy_replay_data comment] [nfsd41: add to nfsd4_set_cache_entry] Signed-off-by: Andy Adamson <andros@netapp.com> Signed-off-by: Benny Halevy <bhalevy@panasas.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/nfs4state.c142
-rw-r--r--fs/nfsd/nfssvc.c4
2 files changed, 146 insertions, 0 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9243dca3576c..a37b91dab1bf 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -852,6 +852,148 @@ out_err:
852 return; 852 return;
853} 853}
854 854
855void
856nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
857{
858 struct nfsd4_compoundres *resp = rqstp->rq_resp;
859
860 resp->cstate.statp = statp;
861}
862
863/*
864 * Dereference the result pages.
865 */
866static void
867nfsd4_release_respages(struct page **respages, short resused)
868{
869 int i;
870
871 dprintk("--> %s\n", __func__);
872 for (i = 0; i < resused; i++) {
873 if (!respages[i])
874 continue;
875 put_page(respages[i]);
876 respages[i] = NULL;
877 }
878}
879
880static void
881nfsd4_copy_pages(struct page **topages, struct page **frompages, short count)
882{
883 int i;
884
885 for (i = 0; i < count; i++) {
886 topages[i] = frompages[i];
887 if (!topages[i])
888 continue;
889 get_page(topages[i]);
890 }
891}
892
893/*
894 * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous
895 * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total
896 * length of the XDR response is less than se_fmaxresp_cached
897 * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a
898 * of the reply (e.g. readdir).
899 *
900 * Store the base and length of the rq_req.head[0] page
901 * of the NFSv4.1 data, just past the rpc header.
902 */
903void
904nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
905{
906 struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
907 struct svc_rqst *rqstp = resp->rqstp;
908 struct nfsd4_compoundargs *args = rqstp->rq_argp;
909 struct nfsd4_op *op = &args->ops[resp->opcnt];
910 struct kvec *resv = &rqstp->rq_res.head[0];
911
912 dprintk("--> %s entry %p\n", __func__, entry);
913
914 /* Don't cache a failed OP_SEQUENCE. */
915 if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status)
916 return;
917 nfsd4_release_respages(entry->ce_respages, entry->ce_resused);
918 entry->ce_resused = rqstp->rq_resused;
919 if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1)
920 entry->ce_resused = NFSD_PAGES_PER_SLOT + 1;
921 nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages,
922 entry->ce_resused);
923 entry->ce_status = resp->cstate.status;
924 entry->ce_datav.iov_base = resp->cstate.statp;
925 entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp -
926 (char *)page_address(rqstp->rq_respages[0]));
927 entry->ce_opcnt = resp->opcnt;
928 /* Current request rpc header length*/
929 entry->ce_rpchdrlen = (char *)resp->cstate.statp -
930 (char *)page_address(rqstp->rq_respages[0]);
931}
932
933/*
934 * We keep the rpc header, but take the nfs reply from the replycache.
935 */
936static int
937nfsd41_copy_replay_data(struct nfsd4_compoundres *resp,
938 struct nfsd4_cache_entry *entry)
939{
940 struct svc_rqst *rqstp = resp->rqstp;
941 struct kvec *resv = &resp->rqstp->rq_res.head[0];
942 int len;
943
944 /* Current request rpc header length*/
945 len = (char *)resp->cstate.statp -
946 (char *)page_address(rqstp->rq_respages[0]);
947 if (entry->ce_datav.iov_len + len > PAGE_SIZE) {
948 dprintk("%s v41 cached reply too large (%Zd).\n", __func__,
949 entry->ce_datav.iov_len);
950 return 0;
951 }
952 /* copy the cached reply nfsd data past the current rpc header */
953 memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base,
954 entry->ce_datav.iov_len);
955 resv->iov_len = len + entry->ce_datav.iov_len;
956 return 1;
957}
958
959/*
960 * Keep the first page of the replay. Copy the NFSv4.1 data from the first
961 * cached page. Replace any futher replay pages from the cache.
962 */
963__be32
964nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp)
965{
966 struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
967 __be32 status;
968
969 dprintk("--> %s entry %p\n", __func__, entry);
970
971
972 if (!nfsd41_copy_replay_data(resp, entry)) {
973 /*
974 * Not enough room to use the replay rpc header, send the
975 * cached header. Release all the allocated result pages.
976 */
977 svc_free_res_pages(resp->rqstp);
978 nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages,
979 entry->ce_resused);
980 } else {
981 /* Release all but the first allocated result page */
982
983 resp->rqstp->rq_resused--;
984 svc_free_res_pages(resp->rqstp);
985
986 nfsd4_copy_pages(&resp->rqstp->rq_respages[1],
987 &entry->ce_respages[1],
988 entry->ce_resused - 1);
989 }
990
991 resp->rqstp->rq_resused = entry->ce_resused;
992 status = entry->ce_status;
993
994 return status;
995}
996
855/* 997/*
856 * Set the exchange_id flags returned by the server. 998 * Set the exchange_id flags returned by the server.
857 */ 999 */
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ef0a3686639d..b5168d1898ec 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -515,6 +515,10 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
515 + rqstp->rq_res.head[0].iov_len; 515 + rqstp->rq_res.head[0].iov_len;
516 rqstp->rq_res.head[0].iov_len += sizeof(__be32); 516 rqstp->rq_res.head[0].iov_len += sizeof(__be32);
517 517
518 /* NFSv4.1 DRC requires statp */
519 if (rqstp->rq_vers == 4)
520 nfsd4_set_statp(rqstp, statp);
521
518 /* Now call the procedure handler, and encode NFS status. */ 522 /* Now call the procedure handler, and encode NFS status. */
519 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 523 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
520 nfserr = map_new_errors(rqstp->rq_vers, nfserr); 524 nfserr = map_new_errors(rqstp->rq_vers, nfserr);