diff options
author | NeilBrown <neilb@suse.de> | 2010-08-12 03:04:06 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2010-09-21 17:08:31 -0400 |
commit | 839049a8732d689d02051e0198fb60a22f7ccb4b (patch) | |
tree | 7ae2ec763232f79f98a0d8122d710ac9887fb8b9 /fs/nfsd | |
parent | 8ff30fa4eff2ff9e207961c654caa093f0c84873 (diff) |
nfsd/idmap: drop special request deferal in favour of improved default.
The idmap code manages request deferal by waiting for a reply from
userspace rather than putting the NFS request on a queue to be retried
from the start.
Now that the common deferal code does this there is no need for the
special code in idmap.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r-- | fs/nfsd/nfs4idmap.c | 105 |
1 files changed, 11 insertions, 94 deletions
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 808b33a4a090..f0695e815f0e 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c | |||
@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void) | |||
482 | cache_unregister(&nametoid_cache); | 482 | cache_unregister(&nametoid_cache); |
483 | } | 483 | } |
484 | 484 | ||
485 | /* | ||
486 | * Deferred request handling | ||
487 | */ | ||
488 | |||
489 | struct idmap_defer_req { | ||
490 | struct cache_req req; | ||
491 | struct cache_deferred_req deferred_req; | ||
492 | wait_queue_head_t waitq; | ||
493 | atomic_t count; | ||
494 | }; | ||
495 | |||
496 | static inline void | ||
497 | put_mdr(struct idmap_defer_req *mdr) | ||
498 | { | ||
499 | if (atomic_dec_and_test(&mdr->count)) | ||
500 | kfree(mdr); | ||
501 | } | ||
502 | |||
503 | static inline void | ||
504 | get_mdr(struct idmap_defer_req *mdr) | ||
505 | { | ||
506 | atomic_inc(&mdr->count); | ||
507 | } | ||
508 | |||
509 | static void | ||
510 | idmap_revisit(struct cache_deferred_req *dreq, int toomany) | ||
511 | { | ||
512 | struct idmap_defer_req *mdr = | ||
513 | container_of(dreq, struct idmap_defer_req, deferred_req); | ||
514 | |||
515 | wake_up(&mdr->waitq); | ||
516 | put_mdr(mdr); | ||
517 | } | ||
518 | |||
519 | static struct cache_deferred_req * | ||
520 | idmap_defer(struct cache_req *req) | ||
521 | { | ||
522 | struct idmap_defer_req *mdr = | ||
523 | container_of(req, struct idmap_defer_req, req); | ||
524 | |||
525 | mdr->deferred_req.revisit = idmap_revisit; | ||
526 | get_mdr(mdr); | ||
527 | return (&mdr->deferred_req); | ||
528 | } | ||
529 | |||
530 | static inline int | ||
531 | do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key, | ||
532 | struct cache_detail *detail, struct ent **item, | ||
533 | struct idmap_defer_req *mdr) | ||
534 | { | ||
535 | *item = lookup_fn(key); | ||
536 | if (!*item) | ||
537 | return -ENOMEM; | ||
538 | return cache_check(detail, &(*item)->h, &mdr->req); | ||
539 | } | ||
540 | |||
541 | static inline int | ||
542 | do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *), | ||
543 | struct ent *key, struct cache_detail *detail, | ||
544 | struct ent **item) | ||
545 | { | ||
546 | int ret = -ENOMEM; | ||
547 | |||
548 | *item = lookup_fn(key); | ||
549 | if (!*item) | ||
550 | goto out_err; | ||
551 | ret = -ETIMEDOUT; | ||
552 | if (!test_bit(CACHE_VALID, &(*item)->h.flags) | ||
553 | || (*item)->h.expiry_time < seconds_since_boot() | ||
554 | || detail->flush_time > (*item)->h.last_refresh) | ||
555 | goto out_put; | ||
556 | ret = -ENOENT; | ||
557 | if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags)) | ||
558 | goto out_put; | ||
559 | return 0; | ||
560 | out_put: | ||
561 | cache_put(&(*item)->h, detail); | ||
562 | out_err: | ||
563 | *item = NULL; | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | static int | 485 | static int |
568 | idmap_lookup(struct svc_rqst *rqstp, | 486 | idmap_lookup(struct svc_rqst *rqstp, |
569 | struct ent *(*lookup_fn)(struct ent *), struct ent *key, | 487 | struct ent *(*lookup_fn)(struct ent *), struct ent *key, |
570 | struct cache_detail *detail, struct ent **item) | 488 | struct cache_detail *detail, struct ent **item) |
571 | { | 489 | { |
572 | struct idmap_defer_req *mdr; | ||
573 | int ret; | 490 | int ret; |
574 | 491 | ||
575 | mdr = kzalloc(sizeof(*mdr), GFP_KERNEL); | 492 | *item = lookup_fn(key); |
576 | if (!mdr) | 493 | if (!*item) |
577 | return -ENOMEM; | 494 | return -ENOMEM; |
578 | atomic_set(&mdr->count, 1); | 495 | retry: |
579 | init_waitqueue_head(&mdr->waitq); | 496 | ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle); |
580 | mdr->req.defer = idmap_defer; | 497 | |
581 | ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr); | 498 | if (ret == -ETIMEDOUT) { |
582 | if (ret == -EAGAIN) { | 499 | struct ent *prev_item = *item; |
583 | wait_event_interruptible_timeout(mdr->waitq, | 500 | *item = lookup_fn(key); |
584 | test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ); | 501 | if (*item != prev_item) |
585 | ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item); | 502 | goto retry; |
503 | cache_put(&(*item)->h, detail); | ||
586 | } | 504 | } |
587 | put_mdr(mdr); | ||
588 | return ret; | 505 | return ret; |
589 | } | 506 | } |
590 | 507 | ||