diff options
author | Andy Adamson <andros@netapp.com> | 2009-08-28 08:45:04 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2009-09-01 22:24:06 -0400 |
commit | 557ce2646e775f6bda734dd92b10d4780874b9c7 (patch) | |
tree | a44a5e2f9d601a039f7ac9b30b5c4af95796fc23 | |
parent | bdac86e2154cfe47552639113265d1fa27cfbe72 (diff) |
nfsd41: replace page based DRC with buffer based DRC
Use NFSD_SLOT_CACHE_SIZE size buffers for sessions DRC instead of holding nfsd
pages in cache.
Connectathon testing has shown that 1024 bytes for encoded compound operation
responses past the sequence operation is sufficient, 512 bytes is a little too
small. Set NFSD_SLOT_CACHE_SIZE to 1024.
Allocate memory for the session DRC in the CREATE_SESSION operation
to guarantee that the memory resource is available for caching responses.
Allocate each slot individually in preparation for slot table size negotiation.
Remove struct nfsd4_cache_entry and helper functions for the old page-based
DRC.
The iov_len calculation in nfs4svc_encode_compoundres is now always
correct. Replay is now done in nfsd4_sequence under the state lock, so
the session ref count is only bumped on non-replay. Clean up the
nfs4svc_encode_compoundres session logic.
The nfsd4_compound_state statp pointer is also not used.
Remove nfsd4_set_statp().
Move useful nfsd4_cache_entry fields into nfsd4_slot.
Signed-off-by: Andy Adamson <andros@netapp.com
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
-rw-r--r-- | fs/nfsd/nfs4state.c | 211 | ||||
-rw-r--r-- | fs/nfsd/nfs4xdr.c | 17 | ||||
-rw-r--r-- | fs/nfsd/nfssvc.c | 4 | ||||
-rw-r--r-- | include/linux/nfsd/state.h | 27 | ||||
-rw-r--r-- | include/linux/nfsd/xdr4.h | 5 |
5 files changed, 79 insertions, 185 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c9a45f49019d..46e9ac526872 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -514,12 +514,23 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp, | |||
514 | return status; | 514 | return status; |
515 | } | 515 | } |
516 | 516 | ||
517 | static void | ||
518 | free_session_slots(struct nfsd4_session *ses) | ||
519 | { | ||
520 | int i; | ||
521 | |||
522 | for (i = 0; i < ses->se_fchannel.maxreqs; i++) | ||
523 | kfree(ses->se_slots[i]); | ||
524 | } | ||
525 | |||
517 | static int | 526 | static int |
518 | alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, | 527 | alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, |
519 | struct nfsd4_create_session *cses) | 528 | struct nfsd4_create_session *cses) |
520 | { | 529 | { |
521 | struct nfsd4_session *new, tmp; | 530 | struct nfsd4_session *new, tmp; |
522 | int idx, status = nfserr_serverfault, slotsize; | 531 | struct nfsd4_slot *sp; |
532 | int idx, slotsize, cachesize, i; | ||
533 | int status; | ||
523 | 534 | ||
524 | memset(&tmp, 0, sizeof(tmp)); | 535 | memset(&tmp, 0, sizeof(tmp)); |
525 | 536 | ||
@@ -530,14 +541,27 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, | |||
530 | if (status) | 541 | if (status) |
531 | goto out; | 542 | goto out; |
532 | 543 | ||
533 | /* allocate struct nfsd4_session and slot table in one piece */ | 544 | BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot) |
534 | slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot); | 545 | + sizeof(struct nfsd4_session) > PAGE_SIZE); |
546 | |||
547 | status = nfserr_serverfault; | ||
548 | /* allocate struct nfsd4_session and slot table pointers in one piece */ | ||
549 | slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *); | ||
535 | new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); | 550 | new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); |
536 | if (!new) | 551 | if (!new) |
537 | goto out; | 552 | goto out; |
538 | 553 | ||
539 | memcpy(new, &tmp, sizeof(*new)); | 554 | memcpy(new, &tmp, sizeof(*new)); |
540 | 555 | ||
556 | /* allocate each struct nfsd4_slot and data cache in one piece */ | ||
557 | cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; | ||
558 | for (i = 0; i < new->se_fchannel.maxreqs; i++) { | ||
559 | sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL); | ||
560 | if (!sp) | ||
561 | goto out_free; | ||
562 | new->se_slots[i] = sp; | ||
563 | } | ||
564 | |||
541 | new->se_client = clp; | 565 | new->se_client = clp; |
542 | gen_sessionid(new); | 566 | gen_sessionid(new); |
543 | idx = hash_sessionid(&new->se_sessionid); | 567 | idx = hash_sessionid(&new->se_sessionid); |
@@ -554,6 +578,10 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, | |||
554 | status = nfs_ok; | 578 | status = nfs_ok; |
555 | out: | 579 | out: |
556 | return status; | 580 | return status; |
581 | out_free: | ||
582 | free_session_slots(new); | ||
583 | kfree(new); | ||
584 | goto out; | ||
557 | } | 585 | } |
558 | 586 | ||
559 | /* caller must hold sessionid_lock */ | 587 | /* caller must hold sessionid_lock */ |
@@ -596,22 +624,16 @@ release_session(struct nfsd4_session *ses) | |||
596 | nfsd4_put_session(ses); | 624 | nfsd4_put_session(ses); |
597 | } | 625 | } |
598 | 626 | ||
599 | static void nfsd4_release_respages(struct page **respages, short resused); | ||
600 | |||
601 | void | 627 | void |
602 | free_session(struct kref *kref) | 628 | free_session(struct kref *kref) |
603 | { | 629 | { |
604 | struct nfsd4_session *ses; | 630 | struct nfsd4_session *ses; |
605 | int i; | ||
606 | 631 | ||
607 | ses = container_of(kref, struct nfsd4_session, se_ref); | 632 | ses = container_of(kref, struct nfsd4_session, se_ref); |
608 | for (i = 0; i < ses->se_fchannel.maxreqs; i++) { | ||
609 | struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry; | ||
610 | nfsd4_release_respages(e->ce_respages, e->ce_resused); | ||
611 | } | ||
612 | spin_lock(&nfsd_drc_lock); | 633 | spin_lock(&nfsd_drc_lock); |
613 | nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; | 634 | nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; |
614 | spin_unlock(&nfsd_drc_lock); | 635 | spin_unlock(&nfsd_drc_lock); |
636 | free_session_slots(ses); | ||
615 | kfree(ses); | 637 | kfree(ses); |
616 | } | 638 | } |
617 | 639 | ||
@@ -968,116 +990,31 @@ out_err: | |||
968 | return; | 990 | return; |
969 | } | 991 | } |
970 | 992 | ||
971 | void | ||
972 | nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) | ||
973 | { | ||
974 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | ||
975 | |||
976 | resp->cstate.statp = statp; | ||
977 | } | ||
978 | |||
979 | /* | ||
980 | * Dereference the result pages. | ||
981 | */ | ||
982 | static void | ||
983 | nfsd4_release_respages(struct page **respages, short resused) | ||
984 | { | ||
985 | int i; | ||
986 | |||
987 | dprintk("--> %s\n", __func__); | ||
988 | for (i = 0; i < resused; i++) { | ||
989 | if (!respages[i]) | ||
990 | continue; | ||
991 | put_page(respages[i]); | ||
992 | respages[i] = NULL; | ||
993 | } | ||
994 | } | ||
995 | |||
996 | static void | ||
997 | nfsd4_copy_pages(struct page **topages, struct page **frompages, short count) | ||
998 | { | ||
999 | int i; | ||
1000 | |||
1001 | for (i = 0; i < count; i++) { | ||
1002 | topages[i] = frompages[i]; | ||
1003 | if (!topages[i]) | ||
1004 | continue; | ||
1005 | get_page(topages[i]); | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | /* | 993 | /* |
1010 | * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous | 994 | * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. |
1011 | * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total | ||
1012 | * length of the XDR response is less than se_fmaxresp_cached | ||
1013 | * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a | ||
1014 | * of the reply (e.g. readdir). | ||
1015 | * | ||
1016 | * Store the base and length of the rq_req.head[0] page | ||
1017 | * of the NFSv4.1 data, just past the rpc header. | ||
1018 | */ | 995 | */ |
1019 | void | 996 | void |
1020 | nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) | 997 | nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) |
1021 | { | 998 | { |
1022 | struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; | 999 | struct nfsd4_slot *slot = resp->cstate.slot; |
1023 | struct svc_rqst *rqstp = resp->rqstp; | 1000 | unsigned int base; |
1024 | struct kvec *resv = &rqstp->rq_res.head[0]; | ||
1025 | |||
1026 | dprintk("--> %s entry %p\n", __func__, entry); | ||
1027 | 1001 | ||
1028 | nfsd4_release_respages(entry->ce_respages, entry->ce_resused); | 1002 | dprintk("--> %s slot %p\n", __func__, slot); |
1029 | entry->ce_opcnt = resp->opcnt; | ||
1030 | entry->ce_status = resp->cstate.status; | ||
1031 | 1003 | ||
1032 | /* | 1004 | slot->sl_opcnt = resp->opcnt; |
1033 | * Don't need a page to cache just the sequence operation - the slot | 1005 | slot->sl_status = resp->cstate.status; |
1034 | * does this for us! | ||
1035 | */ | ||
1036 | 1006 | ||
1037 | if (nfsd4_not_cached(resp)) { | 1007 | if (nfsd4_not_cached(resp)) { |
1038 | entry->ce_resused = 0; | 1008 | slot->sl_datalen = 0; |
1039 | entry->ce_rpchdrlen = 0; | ||
1040 | dprintk("%s Just cache SEQUENCE. ce_cachethis %d\n", __func__, | ||
1041 | resp->cstate.slot->sl_cache_entry.ce_cachethis); | ||
1042 | return; | 1009 | return; |
1043 | } | 1010 | } |
1044 | entry->ce_resused = rqstp->rq_resused; | 1011 | slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; |
1045 | if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1) | 1012 | base = (char *)resp->cstate.datap - |
1046 | entry->ce_resused = NFSD_PAGES_PER_SLOT + 1; | 1013 | (char *)resp->xbuf->head[0].iov_base; |
1047 | nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages, | 1014 | if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, |
1048 | entry->ce_resused); | 1015 | slot->sl_datalen)) |
1049 | entry->ce_datav.iov_base = resp->cstate.statp; | 1016 | WARN("%s: sessions DRC could not cache compound\n", __func__); |
1050 | entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp - | 1017 | return; |
1051 | (char *)page_address(rqstp->rq_respages[0])); | ||
1052 | /* Current request rpc header length*/ | ||
1053 | entry->ce_rpchdrlen = (char *)resp->cstate.statp - | ||
1054 | (char *)page_address(rqstp->rq_respages[0]); | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * We keep the rpc header, but take the nfs reply from the replycache. | ||
1059 | */ | ||
1060 | static int | ||
1061 | nfsd41_copy_replay_data(struct nfsd4_compoundres *resp, | ||
1062 | struct nfsd4_cache_entry *entry) | ||
1063 | { | ||
1064 | struct svc_rqst *rqstp = resp->rqstp; | ||
1065 | struct kvec *resv = &resp->rqstp->rq_res.head[0]; | ||
1066 | int len; | ||
1067 | |||
1068 | /* Current request rpc header length*/ | ||
1069 | len = (char *)resp->cstate.statp - | ||
1070 | (char *)page_address(rqstp->rq_respages[0]); | ||
1071 | if (entry->ce_datav.iov_len + len > PAGE_SIZE) { | ||
1072 | dprintk("%s v41 cached reply too large (%Zd).\n", __func__, | ||
1073 | entry->ce_datav.iov_len); | ||
1074 | return 0; | ||
1075 | } | ||
1076 | /* copy the cached reply nfsd data past the current rpc header */ | ||
1077 | memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base, | ||
1078 | entry->ce_datav.iov_len); | ||
1079 | resv->iov_len = len + entry->ce_datav.iov_len; | ||
1080 | return 1; | ||
1081 | } | 1018 | } |
1082 | 1019 | ||
1083 | /* | 1020 | /* |
@@ -1095,14 +1032,14 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, | |||
1095 | struct nfsd4_slot *slot = resp->cstate.slot; | 1032 | struct nfsd4_slot *slot = resp->cstate.slot; |
1096 | 1033 | ||
1097 | dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__, | 1034 | dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__, |
1098 | resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis); | 1035 | resp->opcnt, resp->cstate.slot->sl_cachethis); |
1099 | 1036 | ||
1100 | /* Encode the replayed sequence operation */ | 1037 | /* Encode the replayed sequence operation */ |
1101 | op = &args->ops[resp->opcnt - 1]; | 1038 | op = &args->ops[resp->opcnt - 1]; |
1102 | nfsd4_encode_operation(resp, op); | 1039 | nfsd4_encode_operation(resp, op); |
1103 | 1040 | ||
1104 | /* Return nfserr_retry_uncached_rep in next operation. */ | 1041 | /* Return nfserr_retry_uncached_rep in next operation. */ |
1105 | if (args->opcnt > 1 && slot->sl_cache_entry.ce_cachethis == 0) { | 1042 | if (args->opcnt > 1 && slot->sl_cachethis == 0) { |
1106 | op = &args->ops[resp->opcnt++]; | 1043 | op = &args->ops[resp->opcnt++]; |
1107 | op->status = nfserr_retry_uncached_rep; | 1044 | op->status = nfserr_retry_uncached_rep; |
1108 | nfsd4_encode_operation(resp, op); | 1045 | nfsd4_encode_operation(resp, op); |
@@ -1111,57 +1048,29 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, | |||
1111 | } | 1048 | } |
1112 | 1049 | ||
1113 | /* | 1050 | /* |
1114 | * Keep the first page of the replay. Copy the NFSv4.1 data from the first | 1051 | * The sequence operation is not cached because we can use the slot and |
1115 | * cached page. Replace any futher replay pages from the cache. | 1052 | * session values. |
1116 | */ | 1053 | */ |
1117 | __be32 | 1054 | __be32 |
1118 | nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, | 1055 | nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, |
1119 | struct nfsd4_sequence *seq) | 1056 | struct nfsd4_sequence *seq) |
1120 | { | 1057 | { |
1121 | struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; | 1058 | struct nfsd4_slot *slot = resp->cstate.slot; |
1122 | __be32 status; | 1059 | __be32 status; |
1123 | 1060 | ||
1124 | dprintk("--> %s entry %p\n", __func__, entry); | 1061 | dprintk("--> %s slot %p\n", __func__, slot); |
1125 | |||
1126 | /* | ||
1127 | * If this is just the sequence operation, we did not keep | ||
1128 | * a page in the cache entry because we can just use the | ||
1129 | * slot info stored in struct nfsd4_sequence that was checked | ||
1130 | * against the slot in nfsd4_sequence(). | ||
1131 | * | ||
1132 | * This occurs when seq->cachethis is FALSE, or when the client | ||
1133 | * session inactivity timer fires and a solo sequence operation | ||
1134 | * is sent (lease renewal). | ||
1135 | */ | ||
1136 | 1062 | ||
1137 | /* Either returns 0 or nfserr_retry_uncached */ | 1063 | /* Either returns 0 or nfserr_retry_uncached */ |
1138 | status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); | 1064 | status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); |
1139 | if (status == nfserr_retry_uncached_rep) | 1065 | if (status == nfserr_retry_uncached_rep) |
1140 | return status; | 1066 | return status; |
1141 | 1067 | ||
1142 | if (!nfsd41_copy_replay_data(resp, entry)) { | 1068 | /* The sequence operation has been encoded, cstate->datap set. */ |
1143 | /* | 1069 | memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); |
1144 | * Not enough room to use the replay rpc header, send the | ||
1145 | * cached header. Release all the allocated result pages. | ||
1146 | */ | ||
1147 | svc_free_res_pages(resp->rqstp); | ||
1148 | nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages, | ||
1149 | entry->ce_resused); | ||
1150 | } else { | ||
1151 | /* Release all but the first allocated result page */ | ||
1152 | |||
1153 | resp->rqstp->rq_resused--; | ||
1154 | svc_free_res_pages(resp->rqstp); | ||
1155 | |||
1156 | nfsd4_copy_pages(&resp->rqstp->rq_respages[1], | ||
1157 | &entry->ce_respages[1], | ||
1158 | entry->ce_resused - 1); | ||
1159 | } | ||
1160 | 1070 | ||
1161 | resp->rqstp->rq_resused = entry->ce_resused; | 1071 | resp->opcnt = slot->sl_opcnt; |
1162 | resp->opcnt = entry->ce_opcnt; | 1072 | resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); |
1163 | resp->cstate.iovlen = entry->ce_datav.iov_len + entry->ce_rpchdrlen; | 1073 | status = slot->sl_status; |
1164 | status = entry->ce_status; | ||
1165 | 1074 | ||
1166 | return status; | 1075 | return status; |
1167 | } | 1076 | } |
@@ -1493,7 +1402,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1493 | if (seq->slotid >= session->se_fchannel.maxreqs) | 1402 | if (seq->slotid >= session->se_fchannel.maxreqs) |
1494 | goto out; | 1403 | goto out; |
1495 | 1404 | ||
1496 | slot = &session->se_slots[seq->slotid]; | 1405 | slot = session->se_slots[seq->slotid]; |
1497 | dprintk("%s: slotid %d\n", __func__, seq->slotid); | 1406 | dprintk("%s: slotid %d\n", __func__, seq->slotid); |
1498 | 1407 | ||
1499 | /* We do not negotiate the number of slots yet, so set the | 1408 | /* We do not negotiate the number of slots yet, so set the |
@@ -1506,7 +1415,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1506 | cstate->slot = slot; | 1415 | cstate->slot = slot; |
1507 | cstate->session = session; | 1416 | cstate->session = session; |
1508 | /* Return the cached reply status and set cstate->status | 1417 | /* Return the cached reply status and set cstate->status |
1509 | * for nfsd4_svc_encode_compoundres processing */ | 1418 | * for nfsd4_proc_compound processing */ |
1510 | status = nfsd4_replay_cache_entry(resp, seq); | 1419 | status = nfsd4_replay_cache_entry(resp, seq); |
1511 | cstate->status = nfserr_replay_cache; | 1420 | cstate->status = nfserr_replay_cache; |
1512 | goto out; | 1421 | goto out; |
@@ -1517,7 +1426,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1517 | /* Success! bump slot seqid */ | 1426 | /* Success! bump slot seqid */ |
1518 | slot->sl_inuse = true; | 1427 | slot->sl_inuse = true; |
1519 | slot->sl_seqid = seq->seqid; | 1428 | slot->sl_seqid = seq->seqid; |
1520 | slot->sl_cache_entry.ce_cachethis = seq->cachethis; | 1429 | slot->sl_cachethis = seq->cachethis; |
1521 | 1430 | ||
1522 | cstate->slot = slot; | 1431 | cstate->slot = slot; |
1523 | cstate->session = session; | 1432 | cstate->session = session; |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 20c5e3db0660..00ed16a18497 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -3057,6 +3057,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, | |||
3057 | WRITE32(0); | 3057 | WRITE32(0); |
3058 | 3058 | ||
3059 | ADJUST_ARGS(); | 3059 | ADJUST_ARGS(); |
3060 | resp->cstate.datap = p; /* DRC cache data pointer */ | ||
3060 | return 0; | 3061 | return 0; |
3061 | } | 3062 | } |
3062 | 3063 | ||
@@ -3159,7 +3160,7 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp) | |||
3159 | return status; | 3160 | return status; |
3160 | 3161 | ||
3161 | session = resp->cstate.session; | 3162 | session = resp->cstate.session; |
3162 | if (session == NULL || slot->sl_cache_entry.ce_cachethis == 0) | 3163 | if (session == NULL || slot->sl_cachethis == 0) |
3163 | return status; | 3164 | return status; |
3164 | 3165 | ||
3165 | if (resp->opcnt >= args->opcnt) | 3166 | if (resp->opcnt >= args->opcnt) |
@@ -3284,6 +3285,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo | |||
3284 | /* | 3285 | /* |
3285 | * All that remains is to write the tag and operation count... | 3286 | * All that remains is to write the tag and operation count... |
3286 | */ | 3287 | */ |
3288 | struct nfsd4_compound_state *cs = &resp->cstate; | ||
3287 | struct kvec *iov; | 3289 | struct kvec *iov; |
3288 | p = resp->tagp; | 3290 | p = resp->tagp; |
3289 | *p++ = htonl(resp->taglen); | 3291 | *p++ = htonl(resp->taglen); |
@@ -3297,15 +3299,10 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo | |||
3297 | iov = &rqstp->rq_res.head[0]; | 3299 | iov = &rqstp->rq_res.head[0]; |
3298 | iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; | 3300 | iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; |
3299 | BUG_ON(iov->iov_len > PAGE_SIZE); | 3301 | BUG_ON(iov->iov_len > PAGE_SIZE); |
3300 | if (nfsd4_has_session(&resp->cstate)) { | 3302 | if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) { |
3301 | if (resp->cstate.status == nfserr_replay_cache && | 3303 | nfsd4_store_cache_entry(resp); |
3302 | !nfsd4_not_cached(resp)) { | 3304 | dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); |
3303 | iov->iov_len = resp->cstate.iovlen; | 3305 | resp->cstate.slot->sl_inuse = false; |
3304 | } else { | ||
3305 | nfsd4_store_cache_entry(resp); | ||
3306 | dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); | ||
3307 | resp->cstate.slot->sl_inuse = 0; | ||
3308 | } | ||
3309 | nfsd4_put_session(resp->cstate.session); | 3306 | nfsd4_put_session(resp->cstate.session); |
3310 | } | 3307 | } |
3311 | return 1; | 3308 | return 1; |
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 675d395c4ab6..4472449c0937 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
@@ -577,10 +577,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) | |||
577 | + rqstp->rq_res.head[0].iov_len; | 577 | + rqstp->rq_res.head[0].iov_len; |
578 | rqstp->rq_res.head[0].iov_len += sizeof(__be32); | 578 | rqstp->rq_res.head[0].iov_len += sizeof(__be32); |
579 | 579 | ||
580 | /* NFSv4.1 DRC requires statp */ | ||
581 | if (rqstp->rq_vers == 4) | ||
582 | nfsd4_set_statp(rqstp, statp); | ||
583 | |||
584 | /* Now call the procedure handler, and encode NFS status. */ | 580 | /* Now call the procedure handler, and encode NFS status. */ |
585 | nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); | 581 | nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); |
586 | nfserr = map_new_errors(rqstp->rq_vers, nfserr); | 582 | nfserr = map_new_errors(rqstp->rq_vers, nfserr); |
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index ff0b771efde6..70ef5f4abbbc 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h | |||
@@ -94,30 +94,23 @@ struct nfs4_cb_conn { | |||
94 | 94 | ||
95 | /* Maximum number of slots per session. 160 is useful for long haul TCP */ | 95 | /* Maximum number of slots per session. 160 is useful for long haul TCP */ |
96 | #define NFSD_MAX_SLOTS_PER_SESSION 160 | 96 | #define NFSD_MAX_SLOTS_PER_SESSION 160 |
97 | /* Maximum number of pages per slot cache entry */ | ||
98 | #define NFSD_PAGES_PER_SLOT 1 | ||
99 | #define NFSD_SLOT_CACHE_SIZE PAGE_SIZE | ||
100 | /* Maximum number of operations per session compound */ | 97 | /* Maximum number of operations per session compound */ |
101 | #define NFSD_MAX_OPS_PER_COMPOUND 16 | 98 | #define NFSD_MAX_OPS_PER_COMPOUND 16 |
99 | /* Maximum session per slot cache size */ | ||
100 | #define NFSD_SLOT_CACHE_SIZE 1024 | ||
102 | /* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ | 101 | /* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ |
103 | #define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 | 102 | #define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 |
104 | #define NFSD_MAX_MEM_PER_SESSION \ | 103 | #define NFSD_MAX_MEM_PER_SESSION \ |
105 | (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) | 104 | (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) |
106 | 105 | ||
107 | struct nfsd4_cache_entry { | ||
108 | __be32 ce_status; | ||
109 | struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ | ||
110 | struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; | ||
111 | int ce_cachethis; | ||
112 | short ce_resused; | ||
113 | int ce_opcnt; | ||
114 | int ce_rpchdrlen; | ||
115 | }; | ||
116 | |||
117 | struct nfsd4_slot { | 106 | struct nfsd4_slot { |
118 | bool sl_inuse; | 107 | bool sl_inuse; |
119 | u32 sl_seqid; | 108 | bool sl_cachethis; |
120 | struct nfsd4_cache_entry sl_cache_entry; | 109 | u16 sl_opcnt; |
110 | u32 sl_seqid; | ||
111 | __be32 sl_status; | ||
112 | u32 sl_datalen; | ||
113 | char sl_data[]; | ||
121 | }; | 114 | }; |
122 | 115 | ||
123 | struct nfsd4_channel_attrs { | 116 | struct nfsd4_channel_attrs { |
@@ -159,7 +152,7 @@ struct nfsd4_session { | |||
159 | struct nfs4_sessionid se_sessionid; | 152 | struct nfs4_sessionid se_sessionid; |
160 | struct nfsd4_channel_attrs se_fchannel; | 153 | struct nfsd4_channel_attrs se_fchannel; |
161 | struct nfsd4_channel_attrs se_bchannel; | 154 | struct nfsd4_channel_attrs se_bchannel; |
162 | struct nfsd4_slot se_slots[]; /* forward channel slots */ | 155 | struct nfsd4_slot *se_slots[]; /* forward channel slots */ |
163 | }; | 156 | }; |
164 | 157 | ||
165 | static inline void | 158 | static inline void |
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 3f716607c86d..73164c2b3d29 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h | |||
@@ -51,7 +51,7 @@ struct nfsd4_compound_state { | |||
51 | /* For sessions DRC */ | 51 | /* For sessions DRC */ |
52 | struct nfsd4_session *session; | 52 | struct nfsd4_session *session; |
53 | struct nfsd4_slot *slot; | 53 | struct nfsd4_slot *slot; |
54 | __be32 *statp; | 54 | __be32 *datap; |
55 | size_t iovlen; | 55 | size_t iovlen; |
56 | u32 minorversion; | 56 | u32 minorversion; |
57 | u32 status; | 57 | u32 status; |
@@ -472,8 +472,7 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) | |||
472 | 472 | ||
473 | static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) | 473 | static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) |
474 | { | 474 | { |
475 | return !resp->cstate.slot->sl_cache_entry.ce_cachethis || | 475 | return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); |
476 | nfsd4_is_solo_sequence(resp); | ||
477 | } | 476 | } |
478 | 477 | ||
479 | #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) | 478 | #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) |