aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/aio.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/fs/aio.c b/fs/aio.c
index dcad3a66748c..8f0127526299 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -441,10 +441,9 @@ static const struct address_space_operations aio_ctx_aops = {
441#endif 441#endif
442}; 442};
443 443
444static int aio_setup_ring(struct kioctx *ctx) 444static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
445{ 445{
446 struct aio_ring *ring; 446 struct aio_ring *ring;
447 unsigned nr_events = ctx->max_reqs;
448 struct mm_struct *mm = current->mm; 447 struct mm_struct *mm = current->mm;
449 unsigned long size, unused; 448 unsigned long size, unused;
450 int nr_pages; 449 int nr_pages;
@@ -707,6 +706,12 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
707 int err = -ENOMEM; 706 int err = -ENOMEM;
708 707
709 /* 708 /*
709 * Store the original nr_events -- what userspace passed to io_setup(),
710 * for counting against the global limit -- before it changes.
711 */
712 unsigned int max_reqs = nr_events;
713
714 /*
710 * We keep track of the number of available ringbuffer slots, to prevent 715 * We keep track of the number of available ringbuffer slots, to prevent
711 * overflow (reqs_available), and we also use percpu counters for this. 716 * overflow (reqs_available), and we also use percpu counters for this.
712 * 717 *
@@ -724,14 +729,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
724 return ERR_PTR(-EINVAL); 729 return ERR_PTR(-EINVAL);
725 } 730 }
726 731
727 if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) 732 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
728 return ERR_PTR(-EAGAIN); 733 return ERR_PTR(-EAGAIN);
729 734
730 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 735 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
731 if (!ctx) 736 if (!ctx)
732 return ERR_PTR(-ENOMEM); 737 return ERR_PTR(-ENOMEM);
733 738
734 ctx->max_reqs = nr_events; 739 ctx->max_reqs = max_reqs;
735 740
736 spin_lock_init(&ctx->ctx_lock); 741 spin_lock_init(&ctx->ctx_lock);
737 spin_lock_init(&ctx->completion_lock); 742 spin_lock_init(&ctx->completion_lock);
@@ -753,7 +758,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
753 if (!ctx->cpu) 758 if (!ctx->cpu)
754 goto err; 759 goto err;
755 760
756 err = aio_setup_ring(ctx); 761 err = aio_setup_ring(ctx, nr_events);
757 if (err < 0) 762 if (err < 0)
758 goto err; 763 goto err;
759 764
@@ -764,8 +769,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
764 769
765 /* limit the number of system wide aios */ 770 /* limit the number of system wide aios */
766 spin_lock(&aio_nr_lock); 771 spin_lock(&aio_nr_lock);
767 if (aio_nr + nr_events > (aio_max_nr * 2UL) || 772 if (aio_nr + ctx->max_reqs > aio_max_nr ||
768 aio_nr + nr_events < aio_nr) { 773 aio_nr + ctx->max_reqs < aio_nr) {
769 spin_unlock(&aio_nr_lock); 774 spin_unlock(&aio_nr_lock);
770 err = -EAGAIN; 775 err = -EAGAIN;
771 goto err_ctx; 776 goto err_ctx;