aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c52
1 files changed, 37 insertions, 15 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 6b868f0e0c4c..067e3d340c35 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -167,10 +167,25 @@ static int __init aio_setup(void)
167} 167}
168__initcall(aio_setup); 168__initcall(aio_setup);
169 169
170static void put_aio_ring_file(struct kioctx *ctx)
171{
172 struct file *aio_ring_file = ctx->aio_ring_file;
173 if (aio_ring_file) {
174 truncate_setsize(aio_ring_file->f_inode, 0);
175
176 /* Prevent further access to the kioctx from migratepages */
177 spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
178 aio_ring_file->f_inode->i_mapping->private_data = NULL;
179 ctx->aio_ring_file = NULL;
180 spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
181
182 fput(aio_ring_file);
183 }
184}
185
170static void aio_free_ring(struct kioctx *ctx) 186static void aio_free_ring(struct kioctx *ctx)
171{ 187{
172 int i; 188 int i;
173 struct file *aio_ring_file = ctx->aio_ring_file;
174 189
175 for (i = 0; i < ctx->nr_pages; i++) { 190 for (i = 0; i < ctx->nr_pages; i++) {
176 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 191 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -178,14 +193,10 @@ static void aio_free_ring(struct kioctx *ctx)
178 put_page(ctx->ring_pages[i]); 193 put_page(ctx->ring_pages[i]);
179 } 194 }
180 195
196 put_aio_ring_file(ctx);
197
181 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 198 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
182 kfree(ctx->ring_pages); 199 kfree(ctx->ring_pages);
183
184 if (aio_ring_file) {
185 truncate_setsize(aio_ring_file->f_inode, 0);
186 fput(aio_ring_file);
187 ctx->aio_ring_file = NULL;
188 }
189} 200}
190 201
191static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 202static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -207,9 +218,8 @@ static int aio_set_page_dirty(struct page *page)
207static int aio_migratepage(struct address_space *mapping, struct page *new, 218static int aio_migratepage(struct address_space *mapping, struct page *new,
208 struct page *old, enum migrate_mode mode) 219 struct page *old, enum migrate_mode mode)
209{ 220{
210 struct kioctx *ctx = mapping->private_data; 221 struct kioctx *ctx;
211 unsigned long flags; 222 unsigned long flags;
212 unsigned idx = old->index;
213 int rc; 223 int rc;
214 224
215 /* Writeback must be complete */ 225 /* Writeback must be complete */
@@ -224,10 +234,23 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
224 234
225 get_page(new); 235 get_page(new);
226 236
227 spin_lock_irqsave(&ctx->completion_lock, flags); 237 /* We can potentially race against kioctx teardown here. Use the
228 migrate_page_copy(new, old); 238 * address_space's private data lock to protect the mapping's
229 ctx->ring_pages[idx] = new; 239 * private_data.
230 spin_unlock_irqrestore(&ctx->completion_lock, flags); 240 */
241 spin_lock(&mapping->private_lock);
242 ctx = mapping->private_data;
243 if (ctx) {
244 pgoff_t idx;
245 spin_lock_irqsave(&ctx->completion_lock, flags);
246 migrate_page_copy(new, old);
247 idx = old->index;
248 if (idx < (pgoff_t)ctx->nr_pages)
249 ctx->ring_pages[idx] = new;
250 spin_unlock_irqrestore(&ctx->completion_lock, flags);
251 } else
252 rc = -EBUSY;
253 spin_unlock(&mapping->private_lock);
231 254
232 return rc; 255 return rc;
233} 256}
@@ -617,8 +640,7 @@ out_freepcpu:
617out_freeref: 640out_freeref:
618 free_percpu(ctx->users.pcpu_count); 641 free_percpu(ctx->users.pcpu_count);
619out_freectx: 642out_freectx:
620 if (ctx->aio_ring_file) 643 put_aio_ring_file(ctx);
621 fput(ctx->aio_ring_file);
622 kmem_cache_free(kioctx_cachep, ctx); 644 kmem_cache_free(kioctx_cachep, ctx);
623 pr_debug("error allocating ioctx %d\n", err); 645 pr_debug("error allocating ioctx %d\n", err);
624 return ERR_PTR(err); 646 return ERR_PTR(err);