diff options
author | Andy Adamson <andros@netapp.com> | 2009-04-03 01:28:15 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2009-04-03 20:41:17 -0400 |
commit | 074fe897536f095309c5aaffcf912952882ab2cb (patch) | |
tree | d4bb2fff645a2df1dbdec4af6787359d888a3695 | |
parent | f9bb94c4c60f6e1d1717077bfddb614f03a607d1 (diff) |
nfsd41: DRC save, restore, and clear functions
Cache all the result pages, including the rpc header in rq_respages[0],
for a request in the slot table cache entry.
Cache the statp pointer from nfsd_dispatch which points into rq_respages[0]
just past the rpc header. When setting a cache entry, calculate and save the
length of the nfs data minus the rpc header for rq_respages[0].
When replaying a cache entry, replace the cached rpc header with the
replayed request rpc result header, unless there is not enough room in the
cached results first page. In that case, use the cached rpc header.
The sessions fore channel maxresponse size cached is set to NFSD_PAGES_PER_SLOT
* PAGE_SIZE. For compounds we are cacheing with operations such as READDIR
that use the xdr_buf->pages to hold data, we choose to cache the extra page of
data rather than copying data from xdr_buf->pages into the xdr_buf->head page.
[nfsd41: limit cache to maxresponsesize_cached]
[nfsd41: mv nfsd4_set_statp under CONFIG_NFSD_V4_1]
[nfsd41: rename nfsd4_move_pages]
[nfsd41: rename page_no variable]
[nfsd41: rename nfsd4_set_cache_entry]
[nfsd41: fix nfsd41_copy_replay_data comment]
[nfsd41: add to nfsd4_set_cache_entry]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
-rw-r--r-- | fs/nfsd/nfs4state.c | 142 | ||||
-rw-r--r-- | fs/nfsd/nfssvc.c | 4 | ||||
-rw-r--r-- | include/linux/nfsd/cache.h | 1 | ||||
-rw-r--r-- | include/linux/nfsd/state.h | 13 | ||||
-rw-r--r-- | include/linux/nfsd/xdr4.h | 4 |
5 files changed, 164 insertions, 0 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 9243dca3576c..a37b91dab1bf 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -852,6 +852,148 @@ out_err: | |||
852 | return; | 852 | return; |
853 | } | 853 | } |
854 | 854 | ||
855 | void | ||
856 | nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) | ||
857 | { | ||
858 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | ||
859 | |||
860 | resp->cstate.statp = statp; | ||
861 | } | ||
862 | |||
863 | /* | ||
864 | * Dereference the result pages. | ||
865 | */ | ||
866 | static void | ||
867 | nfsd4_release_respages(struct page **respages, short resused) | ||
868 | { | ||
869 | int i; | ||
870 | |||
871 | dprintk("--> %s\n", __func__); | ||
872 | for (i = 0; i < resused; i++) { | ||
873 | if (!respages[i]) | ||
874 | continue; | ||
875 | put_page(respages[i]); | ||
876 | respages[i] = NULL; | ||
877 | } | ||
878 | } | ||
879 | |||
880 | static void | ||
881 | nfsd4_copy_pages(struct page **topages, struct page **frompages, short count) | ||
882 | { | ||
883 | int i; | ||
884 | |||
885 | for (i = 0; i < count; i++) { | ||
886 | topages[i] = frompages[i]; | ||
887 | if (!topages[i]) | ||
888 | continue; | ||
889 | get_page(topages[i]); | ||
890 | } | ||
891 | } | ||
892 | |||
893 | /* | ||
894 | * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous | ||
895 | * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total | ||
896 | * length of the XDR response is less than se_fmaxresp_cached | ||
897 | * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a | ||
898 | * of the reply (e.g. readdir). | ||
899 | * | ||
900 | * Store the base and length of the rq_req.head[0] page | ||
901 | * of the NFSv4.1 data, just past the rpc header. | ||
902 | */ | ||
903 | void | ||
904 | nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) | ||
905 | { | ||
906 | struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; | ||
907 | struct svc_rqst *rqstp = resp->rqstp; | ||
908 | struct nfsd4_compoundargs *args = rqstp->rq_argp; | ||
909 | struct nfsd4_op *op = &args->ops[resp->opcnt]; | ||
910 | struct kvec *resv = &rqstp->rq_res.head[0]; | ||
911 | |||
912 | dprintk("--> %s entry %p\n", __func__, entry); | ||
913 | |||
914 | /* Don't cache a failed OP_SEQUENCE. */ | ||
915 | if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status) | ||
916 | return; | ||
917 | nfsd4_release_respages(entry->ce_respages, entry->ce_resused); | ||
918 | entry->ce_resused = rqstp->rq_resused; | ||
919 | if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1) | ||
920 | entry->ce_resused = NFSD_PAGES_PER_SLOT + 1; | ||
921 | nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages, | ||
922 | entry->ce_resused); | ||
923 | entry->ce_status = resp->cstate.status; | ||
924 | entry->ce_datav.iov_base = resp->cstate.statp; | ||
925 | entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp - | ||
926 | (char *)page_address(rqstp->rq_respages[0])); | ||
927 | entry->ce_opcnt = resp->opcnt; | ||
928 | /* Current request rpc header length*/ | ||
929 | entry->ce_rpchdrlen = (char *)resp->cstate.statp - | ||
930 | (char *)page_address(rqstp->rq_respages[0]); | ||
931 | } | ||
932 | |||
933 | /* | ||
934 | * We keep the rpc header, but take the nfs reply from the replycache. | ||
935 | */ | ||
936 | static int | ||
937 | nfsd41_copy_replay_data(struct nfsd4_compoundres *resp, | ||
938 | struct nfsd4_cache_entry *entry) | ||
939 | { | ||
940 | struct svc_rqst *rqstp = resp->rqstp; | ||
941 | struct kvec *resv = &resp->rqstp->rq_res.head[0]; | ||
942 | int len; | ||
943 | |||
944 | /* Current request rpc header length*/ | ||
945 | len = (char *)resp->cstate.statp - | ||
946 | (char *)page_address(rqstp->rq_respages[0]); | ||
947 | if (entry->ce_datav.iov_len + len > PAGE_SIZE) { | ||
948 | dprintk("%s v41 cached reply too large (%Zd).\n", __func__, | ||
949 | entry->ce_datav.iov_len); | ||
950 | return 0; | ||
951 | } | ||
952 | /* copy the cached reply nfsd data past the current rpc header */ | ||
953 | memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base, | ||
954 | entry->ce_datav.iov_len); | ||
955 | resv->iov_len = len + entry->ce_datav.iov_len; | ||
956 | return 1; | ||
957 | } | ||
958 | |||
959 | /* | ||
960 | * Keep the first page of the replay. Copy the NFSv4.1 data from the first | ||
961 | * cached page. Replace any futher replay pages from the cache. | ||
962 | */ | ||
963 | __be32 | ||
964 | nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp) | ||
965 | { | ||
966 | struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; | ||
967 | __be32 status; | ||
968 | |||
969 | dprintk("--> %s entry %p\n", __func__, entry); | ||
970 | |||
971 | |||
972 | if (!nfsd41_copy_replay_data(resp, entry)) { | ||
973 | /* | ||
974 | * Not enough room to use the replay rpc header, send the | ||
975 | * cached header. Release all the allocated result pages. | ||
976 | */ | ||
977 | svc_free_res_pages(resp->rqstp); | ||
978 | nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages, | ||
979 | entry->ce_resused); | ||
980 | } else { | ||
981 | /* Release all but the first allocated result page */ | ||
982 | |||
983 | resp->rqstp->rq_resused--; | ||
984 | svc_free_res_pages(resp->rqstp); | ||
985 | |||
986 | nfsd4_copy_pages(&resp->rqstp->rq_respages[1], | ||
987 | &entry->ce_respages[1], | ||
988 | entry->ce_resused - 1); | ||
989 | } | ||
990 | |||
991 | resp->rqstp->rq_resused = entry->ce_resused; | ||
992 | status = entry->ce_status; | ||
993 | |||
994 | return status; | ||
995 | } | ||
996 | |||
855 | /* | 997 | /* |
856 | * Set the exchange_id flags returned by the server. | 998 | * Set the exchange_id flags returned by the server. |
857 | */ | 999 | */ |
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index ef0a3686639d..b5168d1898ec 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
@@ -515,6 +515,10 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) | |||
515 | + rqstp->rq_res.head[0].iov_len; | 515 | + rqstp->rq_res.head[0].iov_len; |
516 | rqstp->rq_res.head[0].iov_len += sizeof(__be32); | 516 | rqstp->rq_res.head[0].iov_len += sizeof(__be32); |
517 | 517 | ||
518 | /* NFSv4.1 DRC requires statp */ | ||
519 | if (rqstp->rq_vers == 4) | ||
520 | nfsd4_set_statp(rqstp, statp); | ||
521 | |||
518 | /* Now call the procedure handler, and encode NFS status. */ | 522 | /* Now call the procedure handler, and encode NFS status. */ |
519 | nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); | 523 | nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); |
520 | nfserr = map_new_errors(rqstp->rq_vers, nfserr); | 524 | nfserr = map_new_errors(rqstp->rq_vers, nfserr); |
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h index 04b355c801d8..a59a2df6d079 100644 --- a/include/linux/nfsd/cache.h +++ b/include/linux/nfsd/cache.h | |||
@@ -75,5 +75,6 @@ int nfsd_reply_cache_init(void); | |||
75 | void nfsd_reply_cache_shutdown(void); | 75 | void nfsd_reply_cache_shutdown(void); |
76 | int nfsd_cache_lookup(struct svc_rqst *, int); | 76 | int nfsd_cache_lookup(struct svc_rqst *, int); |
77 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); | 77 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); |
78 | void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); | ||
78 | 79 | ||
79 | #endif /* NFSCACHE_H */ | 80 | #endif /* NFSCACHE_H */ |
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 90829db76861..f1edb1d98523 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h | |||
@@ -99,9 +99,22 @@ struct nfs4_callback { | |||
99 | struct rpc_clnt * cb_client; | 99 | struct rpc_clnt * cb_client; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | /* Maximum number of pages per slot cache entry */ | ||
103 | #define NFSD_PAGES_PER_SLOT 1 | ||
104 | |||
105 | struct nfsd4_cache_entry { | ||
106 | __be32 ce_status; | ||
107 | struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ | ||
108 | struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; | ||
109 | short ce_resused; | ||
110 | int ce_opcnt; | ||
111 | int ce_rpchdrlen; | ||
112 | }; | ||
113 | |||
102 | struct nfsd4_slot { | 114 | struct nfsd4_slot { |
103 | bool sl_inuse; | 115 | bool sl_inuse; |
104 | u32 sl_seqid; | 116 | u32 sl_seqid; |
117 | struct nfsd4_cache_entry sl_cache_entry; | ||
105 | }; | 118 | }; |
106 | 119 | ||
107 | struct nfsd4_session { | 120 | struct nfsd4_session { |
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 6e28a041008d..d091684325af 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h | |||
@@ -51,6 +51,8 @@ struct nfsd4_compound_state { | |||
51 | /* For sessions DRC */ | 51 | /* For sessions DRC */ |
52 | struct nfsd4_session *session; | 52 | struct nfsd4_session *session; |
53 | struct nfsd4_slot *slot; | 53 | struct nfsd4_slot *slot; |
54 | __be32 *statp; | ||
55 | u32 status; | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | struct nfsd4_change_info { | 58 | struct nfsd4_change_info { |
@@ -487,6 +489,8 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, | |||
487 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | 489 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, |
488 | struct nfsd4_compound_state *, | 490 | struct nfsd4_compound_state *, |
489 | struct nfsd4_setclientid_confirm *setclientid_confirm); | 491 | struct nfsd4_setclientid_confirm *setclientid_confirm); |
492 | extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); | ||
493 | extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp); | ||
490 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, | 494 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, |
491 | struct nfsd4_compound_state *, | 495 | struct nfsd4_compound_state *, |
492 | struct nfsd4_exchange_id *); | 496 | struct nfsd4_exchange_id *); |