aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/aio.c58
1 files changed, 23 insertions, 35 deletions
diff --git a/fs/aio.c b/fs/aio.c
index efa708b29054..062a5f6a1448 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -362,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
362 struct aio_ring *ring; 362 struct aio_ring *ring;
363 unsigned nr_events = ctx->max_reqs; 363 unsigned nr_events = ctx->max_reqs;
364 struct mm_struct *mm = current->mm; 364 struct mm_struct *mm = current->mm;
365 unsigned long size, populate; 365 unsigned long size, unused;
366 int nr_pages; 366 int nr_pages;
367 int i; 367 int i;
368 struct file *file; 368 struct file *file;
@@ -383,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
383 return -EAGAIN; 383 return -EAGAIN;
384 } 384 }
385 385
386 ctx->aio_ring_file = file;
387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
388 / sizeof(struct io_event);
389
390 ctx->ring_pages = ctx->internal_pages;
391 if (nr_pages > AIO_RING_PAGES) {
392 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
393 GFP_KERNEL);
394 if (!ctx->ring_pages) {
395 put_aio_ring_file(ctx);
396 return -ENOMEM;
397 }
398 }
399
386 for (i = 0; i < nr_pages; i++) { 400 for (i = 0; i < nr_pages; i++) {
387 struct page *page; 401 struct page *page;
388 page = find_or_create_page(file->f_inode->i_mapping, 402 page = find_or_create_page(file->f_inode->i_mapping,
@@ -394,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
394 SetPageUptodate(page); 408 SetPageUptodate(page);
395 SetPageDirty(page); 409 SetPageDirty(page);
396 unlock_page(page); 410 unlock_page(page);
411
412 ctx->ring_pages[i] = page;
397 } 413 }
398 ctx->aio_ring_file = file; 414 ctx->nr_pages = i;
399 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
400 / sizeof(struct io_event);
401 415
402 ctx->ring_pages = ctx->internal_pages; 416 if (unlikely(i != nr_pages)) {
403 if (nr_pages > AIO_RING_PAGES) { 417 aio_free_ring(ctx);
404 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 418 return -EAGAIN;
405 GFP_KERNEL);
406 if (!ctx->ring_pages) {
407 put_aio_ring_file(ctx);
408 return -ENOMEM;
409 }
410 } 419 }
411 420
412 ctx->mmap_size = nr_pages * PAGE_SIZE; 421 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -415,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
415 down_write(&mm->mmap_sem); 424 down_write(&mm->mmap_sem);
416 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 425 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
417 PROT_READ | PROT_WRITE, 426 PROT_READ | PROT_WRITE,
418 MAP_SHARED | MAP_POPULATE, 0, &populate); 427 MAP_SHARED, 0, &unused);
428 up_write(&mm->mmap_sem);
419 if (IS_ERR((void *)ctx->mmap_base)) { 429 if (IS_ERR((void *)ctx->mmap_base)) {
420 up_write(&mm->mmap_sem);
421 ctx->mmap_size = 0; 430 ctx->mmap_size = 0;
422 aio_free_ring(ctx); 431 aio_free_ring(ctx);
423 return -EAGAIN; 432 return -EAGAIN;
@@ -425,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
425 434
426 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 435 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
427 436
428 /* We must do this while still holding mmap_sem for write, as we
429 * need to be protected against userspace attempting to mremap()
430 * or munmap() the ring buffer.
431 */
432 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
433 1, 0, ctx->ring_pages, NULL);
434
435 /* Dropping the reference here is safe as the page cache will hold
436 * onto the pages for us. It is also required so that page migration
437 * can unmap the pages and get the right reference count.
438 */
439 for (i = 0; i < ctx->nr_pages; i++)
440 put_page(ctx->ring_pages[i]);
441
442 up_write(&mm->mmap_sem);
443
444 if (unlikely(ctx->nr_pages != nr_pages)) {
445 aio_free_ring(ctx);
446 return -EAGAIN;
447 }
448
449 ctx->user_id = ctx->mmap_base; 437 ctx->user_id = ctx->mmap_base;
450 ctx->nr_events = nr_events; /* trusted copy */ 438 ctx->nr_events = nr_events; /* trusted copy */
451 439