aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-04-30 10:16:36 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2014-06-24 18:10:25 -0400
commit855ef0dec7271ff7be7381feaaf3f4aed80bd503 (patch)
tree1291243a3364240702b03ef74bac539c674583ab /fs
parent4b70ac5fd9b58bfaa5f25b4ea48f528aefbf3308 (diff)
aio: kill the misleading rcu read locks in ioctx_add_table() and kill_ioctx()
ioctx_add_table() is the writer, it does not need rcu_read_lock() to protect ->ioctx_table. It relies on mm->ioctx_lock and rcu locks just add the confusion. And it doesn't need rcu_dereference() by the same reason, it must see any updates previously done under the same ->ioctx_lock. We could use rcu_dereference_protected() but the patch uses rcu_dereference_raw(), the function is simple enough. The same for kill_ioctx(), although it does not update the pointer. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/fs/aio.c b/fs/aio.c
index b6696462e345..c1d8c480c138 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -554,8 +554,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
554 struct aio_ring *ring; 554 struct aio_ring *ring;
555 555
556 spin_lock(&mm->ioctx_lock); 556 spin_lock(&mm->ioctx_lock);
557 rcu_read_lock(); 557 table = rcu_dereference_raw(mm->ioctx_table);
558 table = rcu_dereference(mm->ioctx_table);
559 558
560 while (1) { 559 while (1) {
561 if (table) 560 if (table)
@@ -563,7 +562,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
563 if (!table->table[i]) { 562 if (!table->table[i]) {
564 ctx->id = i; 563 ctx->id = i;
565 table->table[i] = ctx; 564 table->table[i] = ctx;
566 rcu_read_unlock();
567 spin_unlock(&mm->ioctx_lock); 565 spin_unlock(&mm->ioctx_lock);
568 566
569 /* While kioctx setup is in progress, 567 /* While kioctx setup is in progress,
@@ -577,8 +575,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
577 } 575 }
578 576
579 new_nr = (table ? table->nr : 1) * 4; 577 new_nr = (table ? table->nr : 1) * 4;
580
581 rcu_read_unlock();
582 spin_unlock(&mm->ioctx_lock); 578 spin_unlock(&mm->ioctx_lock);
583 579
584 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * 580 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
@@ -589,8 +585,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
589 table->nr = new_nr; 585 table->nr = new_nr;
590 586
591 spin_lock(&mm->ioctx_lock); 587 spin_lock(&mm->ioctx_lock);
592 rcu_read_lock(); 588 old = rcu_dereference_raw(mm->ioctx_table);
593 old = rcu_dereference(mm->ioctx_table);
594 589
595 if (!old) { 590 if (!old) {
596 rcu_assign_pointer(mm->ioctx_table, table); 591 rcu_assign_pointer(mm->ioctx_table, table);
@@ -737,12 +732,9 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
737 732
738 733
739 spin_lock(&mm->ioctx_lock); 734 spin_lock(&mm->ioctx_lock);
740 rcu_read_lock(); 735 table = rcu_dereference_raw(mm->ioctx_table);
741 table = rcu_dereference(mm->ioctx_table);
742
743 WARN_ON(ctx != table->table[ctx->id]); 736 WARN_ON(ctx != table->table[ctx->id]);
744 table->table[ctx->id] = NULL; 737 table->table[ctx->id] = NULL;
745 rcu_read_unlock();
746 spin_unlock(&mm->ioctx_lock); 738 spin_unlock(&mm->ioctx_lock);
747 739
748 /* percpu_ref_kill() will do the necessary call_rcu() */ 740 /* percpu_ref_kill() will do the necessary call_rcu() */