aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/nfs4proc.c82
-rw-r--r--fs/nfsd/nfs4state.c164
-rw-r--r--fs/nfsd/nfs4xdr.c3
-rw-r--r--fs/nfsd/nfsctl.c5
-rw-r--r--fs/nfsd/nfssvc.c23
5 files changed, 165 insertions, 112 deletions
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 7c8801769a3c..6fde431df9ee 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -123,6 +123,35 @@ nfsd4_check_open_attributes(struct svc_rqst *rqstp,
123 return status; 123 return status;
124} 124}
125 125
126static int
127is_create_with_attrs(struct nfsd4_open *open)
128{
129 return open->op_create == NFS4_OPEN_CREATE
130 && (open->op_createmode == NFS4_CREATE_UNCHECKED
131 || open->op_createmode == NFS4_CREATE_GUARDED
132 || open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
133}
134
135/*
136 * if error occurs when setting the acl, just clear the acl bit
137 * in the returned attr bitmap.
138 */
139static void
140do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
141 struct nfs4_acl *acl, u32 *bmval)
142{
143 __be32 status;
144
145 status = nfsd4_set_nfs4_acl(rqstp, fhp, acl);
146 if (status)
147 /*
148 * We should probably fail the whole open at this point,
149 * but we've already created the file, so it's too late;
150 * So this seems the least of evils:
151 */
152 bmval[0] &= ~FATTR4_WORD0_ACL;
153}
154
126static inline void 155static inline void
127fh_dup2(struct svc_fh *dst, struct svc_fh *src) 156fh_dup2(struct svc_fh *dst, struct svc_fh *src)
128{ 157{
@@ -206,6 +235,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
206 if (status) 235 if (status)
207 goto out; 236 goto out;
208 237
238 if (is_create_with_attrs(open) && open->op_acl != NULL)
239 do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval);
240
209 set_change_info(&open->op_cinfo, current_fh); 241 set_change_info(&open->op_cinfo, current_fh);
210 fh_dup2(current_fh, &resfh); 242 fh_dup2(current_fh, &resfh);
211 243
@@ -536,12 +568,17 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
536 status = nfserr_badtype; 568 status = nfserr_badtype;
537 } 569 }
538 570
539 if (!status) { 571 if (status)
540 fh_unlock(&cstate->current_fh); 572 goto out;
541 set_change_info(&create->cr_cinfo, &cstate->current_fh);
542 fh_dup2(&cstate->current_fh, &resfh);
543 }
544 573
574 if (create->cr_acl != NULL)
575 do_set_nfs4_acl(rqstp, &resfh, create->cr_acl,
576 create->cr_bmval);
577
578 fh_unlock(&cstate->current_fh);
579 set_change_info(&create->cr_cinfo, &cstate->current_fh);
580 fh_dup2(&cstate->current_fh, &resfh);
581out:
545 fh_put(&resfh); 582 fh_put(&resfh);
546 return status; 583 return status;
547} 584}
@@ -947,34 +984,6 @@ static struct nfsd4_operation nfsd4_ops[];
947static const char *nfsd4_op_name(unsigned opnum); 984static const char *nfsd4_op_name(unsigned opnum);
948 985
949/* 986/*
950 * This is a replay of a compound for which no cache entry pages
951 * were used. Encode the sequence operation, and if cachethis is FALSE
952 * encode the uncache rep error on the next operation.
953 */
954static __be32
955nfsd4_enc_uncached_replay(struct nfsd4_compoundargs *args,
956 struct nfsd4_compoundres *resp)
957{
958 struct nfsd4_op *op;
959
960 dprintk("--> %s resp->opcnt %d ce_cachethis %u \n", __func__,
961 resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis);
962
963 /* Encode the replayed sequence operation */
964 BUG_ON(resp->opcnt != 1);
965 op = &args->ops[resp->opcnt - 1];
966 nfsd4_encode_operation(resp, op);
967
968 /*return nfserr_retry_uncached_rep in next operation. */
969 if (resp->cstate.slot->sl_cache_entry.ce_cachethis == 0) {
970 op = &args->ops[resp->opcnt++];
971 op->status = nfserr_retry_uncached_rep;
972 nfsd4_encode_operation(resp, op);
973 }
974 return op->status;
975}
976
977/*
978 * Enforce NFSv4.1 COMPOUND ordering rules. 987 * Enforce NFSv4.1 COMPOUND ordering rules.
979 * 988 *
980 * TODO: 989 * TODO:
@@ -1083,13 +1092,10 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
1083 BUG_ON(op->status == nfs_ok); 1092 BUG_ON(op->status == nfs_ok);
1084 1093
1085encode_op: 1094encode_op:
1086 /* Only from SEQUENCE or CREATE_SESSION */ 1095 /* Only from SEQUENCE */
1087 if (resp->cstate.status == nfserr_replay_cache) { 1096 if (resp->cstate.status == nfserr_replay_cache) {
1088 dprintk("%s NFS4.1 replay from cache\n", __func__); 1097 dprintk("%s NFS4.1 replay from cache\n", __func__);
1089 if (nfsd4_not_cached(resp)) 1098 status = op->status;
1090 status = nfsd4_enc_uncached_replay(args, resp);
1091 else
1092 status = op->status;
1093 goto out; 1099 goto out;
1094 } 1100 }
1095 if (op->status == nfserr_replay_me) { 1101 if (op->status == nfserr_replay_me) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 980a216a48c8..9295c4b56bce 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -414,35 +414,34 @@ gen_sessionid(struct nfsd4_session *ses)
414 414
415/* 415/*
416 * Give the client the number of slots it requests bound by 416 * Give the client the number of slots it requests bound by
417 * NFSD_MAX_SLOTS_PER_SESSION and by sv_drc_max_pages. 417 * NFSD_MAX_SLOTS_PER_SESSION and by nfsd_drc_max_mem.
418 * 418 *
419 * If we run out of pages (sv_drc_pages_used == sv_drc_max_pages) we 419 * If we run out of reserved DRC memory we should (up to a point) re-negotiate
420 * should (up to a point) re-negotiate active sessions and reduce their 420 * active sessions and reduce their slot usage to make rooom for new
421 * slot usage to make rooom for new connections. For now we just fail the 421 * connections. For now we just fail the create session.
422 * create session.
423 */ 422 */
424static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan) 423static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan)
425{ 424{
426 int status = 0, np = fchan->maxreqs * NFSD_PAGES_PER_SLOT; 425 int mem;
427 426
428 if (fchan->maxreqs < 1) 427 if (fchan->maxreqs < 1)
429 return nfserr_inval; 428 return nfserr_inval;
430 else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) 429 else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
431 fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; 430 fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
432 431
433 spin_lock(&nfsd_serv->sv_lock); 432 mem = fchan->maxreqs * NFSD_SLOT_CACHE_SIZE;
434 if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages)
435 np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used;
436 nfsd_serv->sv_drc_pages_used += np;
437 spin_unlock(&nfsd_serv->sv_lock);
438 433
439 if (np <= 0) { 434 spin_lock(&nfsd_drc_lock);
440 status = nfserr_resource; 435 if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem)
441 fchan->maxreqs = 0; 436 mem = ((nfsd_drc_max_mem - nfsd_drc_mem_used) /
442 } else 437 NFSD_SLOT_CACHE_SIZE) * NFSD_SLOT_CACHE_SIZE;
443 fchan->maxreqs = np / NFSD_PAGES_PER_SLOT; 438 nfsd_drc_mem_used += mem;
439 spin_unlock(&nfsd_drc_lock);
444 440
445 return status; 441 fchan->maxreqs = mem / NFSD_SLOT_CACHE_SIZE;
442 if (fchan->maxreqs == 0)
443 return nfserr_resource;
444 return 0;
446} 445}
447 446
448/* 447/*
@@ -466,9 +465,7 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp,
466 fchan->maxresp_sz = maxcount; 465 fchan->maxresp_sz = maxcount;
467 session_fchan->maxresp_sz = fchan->maxresp_sz; 466 session_fchan->maxresp_sz = fchan->maxresp_sz;
468 467
469 /* Set the max response cached size our default which is 468 session_fchan->maxresp_cached = NFSD_SLOT_CACHE_SIZE;
470 * a multiple of PAGE_SIZE and small */
471 session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
472 fchan->maxresp_cached = session_fchan->maxresp_cached; 469 fchan->maxresp_cached = session_fchan->maxresp_cached;
473 470
474 /* Use the client's maxops if possible */ 471 /* Use the client's maxops if possible */
@@ -476,10 +473,6 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp,
476 fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND; 473 fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
477 session_fchan->maxops = fchan->maxops; 474 session_fchan->maxops = fchan->maxops;
478 475
479 /* try to use the client requested number of slots */
480 if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
481 fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
482
483 /* FIXME: Error means no more DRC pages so the server should 476 /* FIXME: Error means no more DRC pages so the server should
484 * recover pages from existing sessions. For now fail session 477 * recover pages from existing sessions. For now fail session
485 * creation. 478 * creation.
@@ -585,6 +578,9 @@ free_session(struct kref *kref)
585 struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry; 578 struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry;
586 nfsd4_release_respages(e->ce_respages, e->ce_resused); 579 nfsd4_release_respages(e->ce_respages, e->ce_resused);
587 } 580 }
581 spin_lock(&nfsd_drc_lock);
582 nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE;
583 spin_unlock(&nfsd_drc_lock);
588 kfree(ses); 584 kfree(ses);
589} 585}
590 586
@@ -657,8 +653,6 @@ static inline void
657free_client(struct nfs4_client *clp) 653free_client(struct nfs4_client *clp)
658{ 654{
659 shutdown_callback_client(clp); 655 shutdown_callback_client(clp);
660 nfsd4_release_respages(clp->cl_slot.sl_cache_entry.ce_respages,
661 clp->cl_slot.sl_cache_entry.ce_resused);
662 if (clp->cl_cred.cr_group_info) 656 if (clp->cl_cred.cr_group_info)
663 put_group_info(clp->cl_cred.cr_group_info); 657 put_group_info(clp->cl_cred.cr_group_info);
664 kfree(clp->cl_principal); 658 kfree(clp->cl_principal);
@@ -1115,6 +1109,36 @@ nfsd41_copy_replay_data(struct nfsd4_compoundres *resp,
1115} 1109}
1116 1110
1117/* 1111/*
1112 * Encode the replay sequence operation from the slot values.
1113 * If cachethis is FALSE encode the uncached rep error on the next
1114 * operation which sets resp->p and increments resp->opcnt for
1115 * nfs4svc_encode_compoundres.
1116 *
1117 */
1118static __be32
1119nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1120 struct nfsd4_compoundres *resp)
1121{
1122 struct nfsd4_op *op;
1123 struct nfsd4_slot *slot = resp->cstate.slot;
1124
1125 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1126 resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis);
1127
1128 /* Encode the replayed sequence operation */
1129 op = &args->ops[resp->opcnt - 1];
1130 nfsd4_encode_operation(resp, op);
1131
1132 /* Return nfserr_retry_uncached_rep in next operation. */
1133 if (args->opcnt > 1 && slot->sl_cache_entry.ce_cachethis == 0) {
1134 op = &args->ops[resp->opcnt++];
1135 op->status = nfserr_retry_uncached_rep;
1136 nfsd4_encode_operation(resp, op);
1137 }
1138 return op->status;
1139}
1140
1141/*
1118 * Keep the first page of the replay. Copy the NFSv4.1 data from the first 1142 * Keep the first page of the replay. Copy the NFSv4.1 data from the first
1119 * cached page. Replace any futher replay pages from the cache. 1143 * cached page. Replace any futher replay pages from the cache.
1120 */ 1144 */
@@ -1137,10 +1161,12 @@ nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1137 * session inactivity timer fires and a solo sequence operation 1161 * session inactivity timer fires and a solo sequence operation
1138 * is sent (lease renewal). 1162 * is sent (lease renewal).
1139 */ 1163 */
1140 if (seq && nfsd4_not_cached(resp)) { 1164 seq->maxslots = resp->cstate.session->se_fchannel.maxreqs;
1141 seq->maxslots = resp->cstate.session->se_fchannel.maxreqs; 1165
1142 return nfs_ok; 1166 /* Either returns 0 or nfserr_retry_uncached */
1143 } 1167 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1168 if (status == nfserr_retry_uncached_rep)
1169 return status;
1144 1170
1145 if (!nfsd41_copy_replay_data(resp, entry)) { 1171 if (!nfsd41_copy_replay_data(resp, entry)) {
1146 /* 1172 /*
@@ -1297,12 +1323,11 @@ out_copy:
1297 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 1323 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1298 exid->clientid.cl_id = new->cl_clientid.cl_id; 1324 exid->clientid.cl_id = new->cl_clientid.cl_id;
1299 1325
1300 new->cl_slot.sl_seqid = 0;
1301 exid->seqid = 1; 1326 exid->seqid = 1;
1302 nfsd4_set_ex_flags(new, exid); 1327 nfsd4_set_ex_flags(new, exid);
1303 1328
1304 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 1329 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1305 new->cl_slot.sl_seqid, new->cl_exchange_flags); 1330 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1306 status = nfs_ok; 1331 status = nfs_ok;
1307 1332
1308out: 1333out:
@@ -1313,40 +1338,60 @@ error:
1313} 1338}
1314 1339
1315static int 1340static int
1316check_slot_seqid(u32 seqid, struct nfsd4_slot *slot) 1341check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1317{ 1342{
1318 dprintk("%s enter. seqid %d slot->sl_seqid %d\n", __func__, seqid, 1343 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1319 slot->sl_seqid); 1344 slot_seqid);
1320 1345
1321 /* The slot is in use, and no response has been sent. */ 1346 /* The slot is in use, and no response has been sent. */
1322 if (slot->sl_inuse) { 1347 if (slot_inuse) {
1323 if (seqid == slot->sl_seqid) 1348 if (seqid == slot_seqid)
1324 return nfserr_jukebox; 1349 return nfserr_jukebox;
1325 else 1350 else
1326 return nfserr_seq_misordered; 1351 return nfserr_seq_misordered;
1327 } 1352 }
1328 /* Normal */ 1353 /* Normal */
1329 if (likely(seqid == slot->sl_seqid + 1)) 1354 if (likely(seqid == slot_seqid + 1))
1330 return nfs_ok; 1355 return nfs_ok;
1331 /* Replay */ 1356 /* Replay */
1332 if (seqid == slot->sl_seqid) 1357 if (seqid == slot_seqid)
1333 return nfserr_replay_cache; 1358 return nfserr_replay_cache;
1334 /* Wraparound */ 1359 /* Wraparound */
1335 if (seqid == 1 && (slot->sl_seqid + 1) == 0) 1360 if (seqid == 1 && (slot_seqid + 1) == 0)
1336 return nfs_ok; 1361 return nfs_ok;
1337 /* Misordered replay or misordered new request */ 1362 /* Misordered replay or misordered new request */
1338 return nfserr_seq_misordered; 1363 return nfserr_seq_misordered;
1339} 1364}
1340 1365
1366/*
1367 * Cache the create session result into the create session single DRC
1368 * slot cache by saving the xdr structure. sl_seqid has been set.
1369 * Do this for solo or embedded create session operations.
1370 */
1371static void
1372nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1373 struct nfsd4_clid_slot *slot, int nfserr)
1374{
1375 slot->sl_status = nfserr;
1376 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1377}
1378
1379static __be32
1380nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1381 struct nfsd4_clid_slot *slot)
1382{
1383 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1384 return slot->sl_status;
1385}
1386
1341__be32 1387__be32
1342nfsd4_create_session(struct svc_rqst *rqstp, 1388nfsd4_create_session(struct svc_rqst *rqstp,
1343 struct nfsd4_compound_state *cstate, 1389 struct nfsd4_compound_state *cstate,
1344 struct nfsd4_create_session *cr_ses) 1390 struct nfsd4_create_session *cr_ses)
1345{ 1391{
1346 u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr; 1392 u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr;
1347 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1348 struct nfs4_client *conf, *unconf; 1393 struct nfs4_client *conf, *unconf;
1349 struct nfsd4_slot *slot = NULL; 1394 struct nfsd4_clid_slot *cs_slot = NULL;
1350 int status = 0; 1395 int status = 0;
1351 1396
1352 nfs4_lock_state(); 1397 nfs4_lock_state();
@@ -1354,24 +1399,22 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1354 conf = find_confirmed_client(&cr_ses->clientid); 1399 conf = find_confirmed_client(&cr_ses->clientid);
1355 1400
1356 if (conf) { 1401 if (conf) {
1357 slot = &conf->cl_slot; 1402 cs_slot = &conf->cl_cs_slot;
1358 status = check_slot_seqid(cr_ses->seqid, slot); 1403 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1359 if (status == nfserr_replay_cache) { 1404 if (status == nfserr_replay_cache) {
1360 dprintk("Got a create_session replay! seqid= %d\n", 1405 dprintk("Got a create_session replay! seqid= %d\n",
1361 slot->sl_seqid); 1406 cs_slot->sl_seqid);
1362 cstate->slot = slot;
1363 cstate->status = status;
1364 /* Return the cached reply status */ 1407 /* Return the cached reply status */
1365 status = nfsd4_replay_cache_entry(resp, NULL); 1408 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1366 goto out; 1409 goto out;
1367 } else if (cr_ses->seqid != conf->cl_slot.sl_seqid + 1) { 1410 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1368 status = nfserr_seq_misordered; 1411 status = nfserr_seq_misordered;
1369 dprintk("Sequence misordered!\n"); 1412 dprintk("Sequence misordered!\n");
1370 dprintk("Expected seqid= %d but got seqid= %d\n", 1413 dprintk("Expected seqid= %d but got seqid= %d\n",
1371 slot->sl_seqid, cr_ses->seqid); 1414 cs_slot->sl_seqid, cr_ses->seqid);
1372 goto out; 1415 goto out;
1373 } 1416 }
1374 conf->cl_slot.sl_seqid++; 1417 cs_slot->sl_seqid++;
1375 } else if (unconf) { 1418 } else if (unconf) {
1376 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 1419 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1377 (ip_addr != unconf->cl_addr)) { 1420 (ip_addr != unconf->cl_addr)) {
@@ -1379,15 +1422,15 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1379 goto out; 1422 goto out;
1380 } 1423 }
1381 1424
1382 slot = &unconf->cl_slot; 1425 cs_slot = &unconf->cl_cs_slot;
1383 status = check_slot_seqid(cr_ses->seqid, slot); 1426 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1384 if (status) { 1427 if (status) {
1385 /* an unconfirmed replay returns misordered */ 1428 /* an unconfirmed replay returns misordered */
1386 status = nfserr_seq_misordered; 1429 status = nfserr_seq_misordered;
1387 goto out; 1430 goto out_cache;
1388 } 1431 }
1389 1432
1390 slot->sl_seqid++; /* from 0 to 1 */ 1433 cs_slot->sl_seqid++; /* from 0 to 1 */
1391 move_to_confirmed(unconf); 1434 move_to_confirmed(unconf);
1392 1435
1393 /* 1436 /*
@@ -1408,12 +1451,11 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1408 1451
1409 memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data, 1452 memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data,
1410 NFS4_MAX_SESSIONID_LEN); 1453 NFS4_MAX_SESSIONID_LEN);
1411 cr_ses->seqid = slot->sl_seqid; 1454 cr_ses->seqid = cs_slot->sl_seqid;
1412 1455
1413 slot->sl_inuse = true; 1456out_cache:
1414 cstate->slot = slot; 1457 /* cache solo and embedded create sessions under the state lock */
1415 /* Ensure a page is used for the cache */ 1458 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1416 slot->sl_cache_entry.ce_cachethis = 1;
1417out: 1459out:
1418 nfs4_unlock_state(); 1460 nfs4_unlock_state();
1419 dprintk("%s returns %d\n", __func__, ntohl(status)); 1461 dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1481,7 +1523,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1481 slot = &session->se_slots[seq->slotid]; 1523 slot = &session->se_slots[seq->slotid];
1482 dprintk("%s: slotid %d\n", __func__, seq->slotid); 1524 dprintk("%s: slotid %d\n", __func__, seq->slotid);
1483 1525
1484 status = check_slot_seqid(seq->seqid, slot); 1526 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
1485 if (status == nfserr_replay_cache) { 1527 if (status == nfserr_replay_cache) {
1486 cstate->slot = slot; 1528 cstate->slot = slot;
1487 cstate->session = session; 1529 cstate->session = session;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2dcc7feaa6ff..fdf632bf1cfe 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3313,8 +3313,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
3313 dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); 3313 dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
3314 resp->cstate.slot->sl_inuse = 0; 3314 resp->cstate.slot->sl_inuse = 0;
3315 } 3315 }
3316 if (resp->cstate.session) 3316 nfsd4_put_session(resp->cstate.session);
3317 nfsd4_put_session(resp->cstate.session);
3318 } 3317 }
3319 return 1; 3318 return 1;
3320} 3319}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7e906c5b7671..b764d7d898e9 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -776,10 +776,7 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
776 size -= len; 776 size -= len;
777 mesg += len; 777 mesg += len;
778 } 778 }
779 779 rv = mesg - buf;
780 mutex_unlock(&nfsd_mutex);
781 return (mesg-buf);
782
783out_free: 780out_free:
784 kfree(nthreads); 781 kfree(nthreads);
785 mutex_unlock(&nfsd_mutex); 782 mutex_unlock(&nfsd_mutex);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 492c79b7800b..d68cd056b281 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -66,6 +66,16 @@ struct timeval nfssvc_boot;
66DEFINE_MUTEX(nfsd_mutex); 66DEFINE_MUTEX(nfsd_mutex);
67struct svc_serv *nfsd_serv; 67struct svc_serv *nfsd_serv;
68 68
69/*
70 * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
71 * nfsd_drc_max_pages limits the total amount of memory available for
72 * version 4.1 DRC caches.
73 * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
74 */
75spinlock_t nfsd_drc_lock;
76unsigned int nfsd_drc_max_mem;
77unsigned int nfsd_drc_mem_used;
78
69#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 79#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
70static struct svc_stat nfsd_acl_svcstats; 80static struct svc_stat nfsd_acl_svcstats;
71static struct svc_version * nfsd_acl_version[] = { 81static struct svc_version * nfsd_acl_version[] = {
@@ -235,13 +245,12 @@ void nfsd_reset_versions(void)
235 */ 245 */
236static void set_max_drc(void) 246static void set_max_drc(void)
237{ 247{
238 /* The percent of nr_free_buffer_pages used by the V4.1 server DRC */ 248 #define NFSD_DRC_SIZE_SHIFT 10
239 #define NFSD_DRC_SIZE_SHIFT 7 249 nfsd_drc_max_mem = (nr_free_buffer_pages()
240 nfsd_serv->sv_drc_max_pages = nr_free_buffer_pages() 250 >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
241 >> NFSD_DRC_SIZE_SHIFT; 251 nfsd_drc_mem_used = 0;
242 nfsd_serv->sv_drc_pages_used = 0; 252 spin_lock_init(&nfsd_drc_lock);
243 dprintk("%s svc_drc_max_pages %u\n", __func__, 253 dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
244 nfsd_serv->sv_drc_max_pages);
245} 254}
246 255
247int nfsd_create_serv(void) 256int nfsd_create_serv(void)