aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4state.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r--fs/nfsd/nfs4state.c211
1 files changed, 60 insertions, 151 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c9a45f49019d..46e9ac526872 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -514,12 +514,23 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp,
514 return status; 514 return status;
515} 515}
516 516
517static void
518free_session_slots(struct nfsd4_session *ses)
519{
520 int i;
521
522 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
523 kfree(ses->se_slots[i]);
524}
525
517static int 526static int
518alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, 527alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
519 struct nfsd4_create_session *cses) 528 struct nfsd4_create_session *cses)
520{ 529{
521 struct nfsd4_session *new, tmp; 530 struct nfsd4_session *new, tmp;
522 int idx, status = nfserr_serverfault, slotsize; 531 struct nfsd4_slot *sp;
532 int idx, slotsize, cachesize, i;
533 int status;
523 534
524 memset(&tmp, 0, sizeof(tmp)); 535 memset(&tmp, 0, sizeof(tmp));
525 536
@@ -530,14 +541,27 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
530 if (status) 541 if (status)
531 goto out; 542 goto out;
532 543
533 /* allocate struct nfsd4_session and slot table in one piece */ 544 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot)
534 slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot); 545 + sizeof(struct nfsd4_session) > PAGE_SIZE);
546
547 status = nfserr_serverfault;
548 /* allocate struct nfsd4_session and slot table pointers in one piece */
549 slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *);
535 new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); 550 new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
536 if (!new) 551 if (!new)
537 goto out; 552 goto out;
538 553
539 memcpy(new, &tmp, sizeof(*new)); 554 memcpy(new, &tmp, sizeof(*new));
540 555
556 /* allocate each struct nfsd4_slot and data cache in one piece */
557 cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
558 for (i = 0; i < new->se_fchannel.maxreqs; i++) {
559 sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL);
560 if (!sp)
561 goto out_free;
562 new->se_slots[i] = sp;
563 }
564
541 new->se_client = clp; 565 new->se_client = clp;
542 gen_sessionid(new); 566 gen_sessionid(new);
543 idx = hash_sessionid(&new->se_sessionid); 567 idx = hash_sessionid(&new->se_sessionid);
@@ -554,6 +578,10 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
554 status = nfs_ok; 578 status = nfs_ok;
555out: 579out:
556 return status; 580 return status;
581out_free:
582 free_session_slots(new);
583 kfree(new);
584 goto out;
557} 585}
558 586
559/* caller must hold sessionid_lock */ 587/* caller must hold sessionid_lock */
@@ -596,22 +624,16 @@ release_session(struct nfsd4_session *ses)
596 nfsd4_put_session(ses); 624 nfsd4_put_session(ses);
597} 625}
598 626
599static void nfsd4_release_respages(struct page **respages, short resused);
600
601void 627void
602free_session(struct kref *kref) 628free_session(struct kref *kref)
603{ 629{
604 struct nfsd4_session *ses; 630 struct nfsd4_session *ses;
605 int i;
606 631
607 ses = container_of(kref, struct nfsd4_session, se_ref); 632 ses = container_of(kref, struct nfsd4_session, se_ref);
608 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
609 struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry;
610 nfsd4_release_respages(e->ce_respages, e->ce_resused);
611 }
612 spin_lock(&nfsd_drc_lock); 633 spin_lock(&nfsd_drc_lock);
613 nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; 634 nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE;
614 spin_unlock(&nfsd_drc_lock); 635 spin_unlock(&nfsd_drc_lock);
636 free_session_slots(ses);
615 kfree(ses); 637 kfree(ses);
616} 638}
617 639
@@ -968,116 +990,31 @@ out_err:
968 return; 990 return;
969} 991}
970 992
971void
972nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
973{
974 struct nfsd4_compoundres *resp = rqstp->rq_resp;
975
976 resp->cstate.statp = statp;
977}
978
979/*
980 * Dereference the result pages.
981 */
982static void
983nfsd4_release_respages(struct page **respages, short resused)
984{
985 int i;
986
987 dprintk("--> %s\n", __func__);
988 for (i = 0; i < resused; i++) {
989 if (!respages[i])
990 continue;
991 put_page(respages[i]);
992 respages[i] = NULL;
993 }
994}
995
996static void
997nfsd4_copy_pages(struct page **topages, struct page **frompages, short count)
998{
999 int i;
1000
1001 for (i = 0; i < count; i++) {
1002 topages[i] = frompages[i];
1003 if (!topages[i])
1004 continue;
1005 get_page(topages[i]);
1006 }
1007}
1008
1009/* 993/*
1010 * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous 994 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1011 * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total
1012 * length of the XDR response is less than se_fmaxresp_cached
1013 * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a
1014 * of the reply (e.g. readdir).
1015 *
1016 * Store the base and length of the rq_req.head[0] page
1017 * of the NFSv4.1 data, just past the rpc header.
1018 */ 995 */
1019void 996void
1020nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 997nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1021{ 998{
1022 struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; 999 struct nfsd4_slot *slot = resp->cstate.slot;
1023 struct svc_rqst *rqstp = resp->rqstp; 1000 unsigned int base;
1024 struct kvec *resv = &rqstp->rq_res.head[0];
1025
1026 dprintk("--> %s entry %p\n", __func__, entry);
1027 1001
1028 nfsd4_release_respages(entry->ce_respages, entry->ce_resused); 1002 dprintk("--> %s slot %p\n", __func__, slot);
1029 entry->ce_opcnt = resp->opcnt;
1030 entry->ce_status = resp->cstate.status;
1031 1003
1032 /* 1004 slot->sl_opcnt = resp->opcnt;
1033 * Don't need a page to cache just the sequence operation - the slot 1005 slot->sl_status = resp->cstate.status;
1034 * does this for us!
1035 */
1036 1006
1037 if (nfsd4_not_cached(resp)) { 1007 if (nfsd4_not_cached(resp)) {
1038 entry->ce_resused = 0; 1008 slot->sl_datalen = 0;
1039 entry->ce_rpchdrlen = 0;
1040 dprintk("%s Just cache SEQUENCE. ce_cachethis %d\n", __func__,
1041 resp->cstate.slot->sl_cache_entry.ce_cachethis);
1042 return; 1009 return;
1043 } 1010 }
1044 entry->ce_resused = rqstp->rq_resused; 1011 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1045 if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1) 1012 base = (char *)resp->cstate.datap -
1046 entry->ce_resused = NFSD_PAGES_PER_SLOT + 1; 1013 (char *)resp->xbuf->head[0].iov_base;
1047 nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages, 1014 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1048 entry->ce_resused); 1015 slot->sl_datalen))
1049 entry->ce_datav.iov_base = resp->cstate.statp; 1016 WARN("%s: sessions DRC could not cache compound\n", __func__);
1050 entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp - 1017 return;
1051 (char *)page_address(rqstp->rq_respages[0]));
1052 /* Current request rpc header length*/
1053 entry->ce_rpchdrlen = (char *)resp->cstate.statp -
1054 (char *)page_address(rqstp->rq_respages[0]);
1055}
1056
1057/*
1058 * We keep the rpc header, but take the nfs reply from the replycache.
1059 */
1060static int
1061nfsd41_copy_replay_data(struct nfsd4_compoundres *resp,
1062 struct nfsd4_cache_entry *entry)
1063{
1064 struct svc_rqst *rqstp = resp->rqstp;
1065 struct kvec *resv = &resp->rqstp->rq_res.head[0];
1066 int len;
1067
1068 /* Current request rpc header length*/
1069 len = (char *)resp->cstate.statp -
1070 (char *)page_address(rqstp->rq_respages[0]);
1071 if (entry->ce_datav.iov_len + len > PAGE_SIZE) {
1072 dprintk("%s v41 cached reply too large (%Zd).\n", __func__,
1073 entry->ce_datav.iov_len);
1074 return 0;
1075 }
1076 /* copy the cached reply nfsd data past the current rpc header */
1077 memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base,
1078 entry->ce_datav.iov_len);
1079 resv->iov_len = len + entry->ce_datav.iov_len;
1080 return 1;
1081} 1018}
1082 1019
1083/* 1020/*
@@ -1095,14 +1032,14 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1095 struct nfsd4_slot *slot = resp->cstate.slot; 1032 struct nfsd4_slot *slot = resp->cstate.slot;
1096 1033
1097 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__, 1034 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1098 resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis); 1035 resp->opcnt, resp->cstate.slot->sl_cachethis);
1099 1036
1100 /* Encode the replayed sequence operation */ 1037 /* Encode the replayed sequence operation */
1101 op = &args->ops[resp->opcnt - 1]; 1038 op = &args->ops[resp->opcnt - 1];
1102 nfsd4_encode_operation(resp, op); 1039 nfsd4_encode_operation(resp, op);
1103 1040
1104 /* Return nfserr_retry_uncached_rep in next operation. */ 1041 /* Return nfserr_retry_uncached_rep in next operation. */
1105 if (args->opcnt > 1 && slot->sl_cache_entry.ce_cachethis == 0) { 1042 if (args->opcnt > 1 && slot->sl_cachethis == 0) {
1106 op = &args->ops[resp->opcnt++]; 1043 op = &args->ops[resp->opcnt++];
1107 op->status = nfserr_retry_uncached_rep; 1044 op->status = nfserr_retry_uncached_rep;
1108 nfsd4_encode_operation(resp, op); 1045 nfsd4_encode_operation(resp, op);
@@ -1111,57 +1048,29 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1111} 1048}
1112 1049
1113/* 1050/*
1114 * Keep the first page of the replay. Copy the NFSv4.1 data from the first 1051 * The sequence operation is not cached because we can use the slot and
1115 * cached page. Replace any futher replay pages from the cache. 1052 * session values.
1116 */ 1053 */
1117__be32 1054__be32
1118nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 1055nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1119 struct nfsd4_sequence *seq) 1056 struct nfsd4_sequence *seq)
1120{ 1057{
1121 struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; 1058 struct nfsd4_slot *slot = resp->cstate.slot;
1122 __be32 status; 1059 __be32 status;
1123 1060
1124 dprintk("--> %s entry %p\n", __func__, entry); 1061 dprintk("--> %s slot %p\n", __func__, slot);
1125
1126 /*
1127 * If this is just the sequence operation, we did not keep
1128 * a page in the cache entry because we can just use the
1129 * slot info stored in struct nfsd4_sequence that was checked
1130 * against the slot in nfsd4_sequence().
1131 *
1132 * This occurs when seq->cachethis is FALSE, or when the client
1133 * session inactivity timer fires and a solo sequence operation
1134 * is sent (lease renewal).
1135 */
1136 1062
1137 /* Either returns 0 or nfserr_retry_uncached */ 1063 /* Either returns 0 or nfserr_retry_uncached */
1138 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 1064 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1139 if (status == nfserr_retry_uncached_rep) 1065 if (status == nfserr_retry_uncached_rep)
1140 return status; 1066 return status;
1141 1067
1142 if (!nfsd41_copy_replay_data(resp, entry)) { 1068 /* The sequence operation has been encoded, cstate->datap set. */
1143 /* 1069 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1144 * Not enough room to use the replay rpc header, send the
1145 * cached header. Release all the allocated result pages.
1146 */
1147 svc_free_res_pages(resp->rqstp);
1148 nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages,
1149 entry->ce_resused);
1150 } else {
1151 /* Release all but the first allocated result page */
1152
1153 resp->rqstp->rq_resused--;
1154 svc_free_res_pages(resp->rqstp);
1155
1156 nfsd4_copy_pages(&resp->rqstp->rq_respages[1],
1157 &entry->ce_respages[1],
1158 entry->ce_resused - 1);
1159 }
1160 1070
1161 resp->rqstp->rq_resused = entry->ce_resused; 1071 resp->opcnt = slot->sl_opcnt;
1162 resp->opcnt = entry->ce_opcnt; 1072 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1163 resp->cstate.iovlen = entry->ce_datav.iov_len + entry->ce_rpchdrlen; 1073 status = slot->sl_status;
1164 status = entry->ce_status;
1165 1074
1166 return status; 1075 return status;
1167} 1076}
@@ -1493,7 +1402,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1493 if (seq->slotid >= session->se_fchannel.maxreqs) 1402 if (seq->slotid >= session->se_fchannel.maxreqs)
1494 goto out; 1403 goto out;
1495 1404
1496 slot = &session->se_slots[seq->slotid]; 1405 slot = session->se_slots[seq->slotid];
1497 dprintk("%s: slotid %d\n", __func__, seq->slotid); 1406 dprintk("%s: slotid %d\n", __func__, seq->slotid);
1498 1407
1499 /* We do not negotiate the number of slots yet, so set the 1408 /* We do not negotiate the number of slots yet, so set the
@@ -1506,7 +1415,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1506 cstate->slot = slot; 1415 cstate->slot = slot;
1507 cstate->session = session; 1416 cstate->session = session;
1508 /* Return the cached reply status and set cstate->status 1417 /* Return the cached reply status and set cstate->status
1509 * for nfsd4_svc_encode_compoundres processing */ 1418 * for nfsd4_proc_compound processing */
1510 status = nfsd4_replay_cache_entry(resp, seq); 1419 status = nfsd4_replay_cache_entry(resp, seq);
1511 cstate->status = nfserr_replay_cache; 1420 cstate->status = nfserr_replay_cache;
1512 goto out; 1421 goto out;
@@ -1517,7 +1426,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1517 /* Success! bump slot seqid */ 1426 /* Success! bump slot seqid */
1518 slot->sl_inuse = true; 1427 slot->sl_inuse = true;
1519 slot->sl_seqid = seq->seqid; 1428 slot->sl_seqid = seq->seqid;
1520 slot->sl_cache_entry.ce_cachethis = seq->cachethis; 1429 slot->sl_cachethis = seq->cachethis;
1521 1430
1522 cstate->slot = slot; 1431 cstate->slot = slot;
1523 cstate->session = session; 1432 cstate->session = session;