aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c32
1 files changed, 11 insertions, 21 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 4f71627264fd..da887604dfc5 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -305,15 +305,18 @@ out_freectx:
305 return ERR_PTR(err); 305 return ERR_PTR(err);
306} 306}
307 307
308/* aio_cancel_all 308/* kill_ctx
309 * Cancels all outstanding aio requests on an aio context. Used 309 * Cancels all outstanding aio requests on an aio context. Used
310 * when the processes owning a context have all exited to encourage 310 * when the processes owning a context have all exited to encourage
311 * the rapid destruction of the kioctx. 311 * the rapid destruction of the kioctx.
312 */ 312 */
313static void aio_cancel_all(struct kioctx *ctx) 313static void kill_ctx(struct kioctx *ctx)
314{ 314{
315 int (*cancel)(struct kiocb *, struct io_event *); 315 int (*cancel)(struct kiocb *, struct io_event *);
316 struct task_struct *tsk = current;
317 DECLARE_WAITQUEUE(wait, tsk);
316 struct io_event res; 318 struct io_event res;
319
317 spin_lock_irq(&ctx->ctx_lock); 320 spin_lock_irq(&ctx->ctx_lock);
318 ctx->dead = 1; 321 ctx->dead = 1;
319 while (!list_empty(&ctx->active_reqs)) { 322 while (!list_empty(&ctx->active_reqs)) {
@@ -329,15 +332,7 @@ static void aio_cancel_all(struct kioctx *ctx)
329 spin_lock_irq(&ctx->ctx_lock); 332 spin_lock_irq(&ctx->ctx_lock);
330 } 333 }
331 } 334 }
332 spin_unlock_irq(&ctx->ctx_lock);
333}
334
335static void wait_for_all_aios(struct kioctx *ctx)
336{
337 struct task_struct *tsk = current;
338 DECLARE_WAITQUEUE(wait, tsk);
339 335
340 spin_lock_irq(&ctx->ctx_lock);
341 if (!ctx->reqs_active) 336 if (!ctx->reqs_active)
342 goto out; 337 goto out;
343 338
@@ -387,9 +382,7 @@ void exit_aio(struct mm_struct *mm)
387 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 382 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
388 hlist_del_rcu(&ctx->list); 383 hlist_del_rcu(&ctx->list);
389 384
390 aio_cancel_all(ctx); 385 kill_ctx(ctx);
391
392 wait_for_all_aios(ctx);
393 386
394 if (1 != atomic_read(&ctx->users)) 387 if (1 != atomic_read(&ctx->users))
395 printk(KERN_DEBUG 388 printk(KERN_DEBUG
@@ -1269,8 +1262,7 @@ static void io_destroy(struct kioctx *ioctx)
1269 if (likely(!was_dead)) 1262 if (likely(!was_dead))
1270 put_ioctx(ioctx); /* twice for the list */ 1263 put_ioctx(ioctx); /* twice for the list */
1271 1264
1272 aio_cancel_all(ioctx); 1265 kill_ctx(ioctx);
1273 wait_for_all_aios(ioctx);
1274 1266
1275 /* 1267 /*
1276 * Wake up any waiters. The setting of ctx->dead must be seen 1268 * Wake up any waiters. The setting of ctx->dead must be seen
@@ -1278,7 +1270,6 @@ static void io_destroy(struct kioctx *ioctx)
1278 * locking done by the above calls to ensure this consistency. 1270 * locking done by the above calls to ensure this consistency.
1279 */ 1271 */
1280 wake_up_all(&ioctx->wait); 1272 wake_up_all(&ioctx->wait);
1281 put_ioctx(ioctx); /* once for the lookup */
1282} 1273}
1283 1274
1284/* sys_io_setup: 1275/* sys_io_setup:
@@ -1315,11 +1306,9 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1315 ret = PTR_ERR(ioctx); 1306 ret = PTR_ERR(ioctx);
1316 if (!IS_ERR(ioctx)) { 1307 if (!IS_ERR(ioctx)) {
1317 ret = put_user(ioctx->user_id, ctxp); 1308 ret = put_user(ioctx->user_id, ctxp);
1318 if (!ret) { 1309 if (ret)
1319 put_ioctx(ioctx); 1310 io_destroy(ioctx);
1320 return 0; 1311 put_ioctx(ioctx);
1321 }
1322 io_destroy(ioctx);
1323 } 1312 }
1324 1313
1325out: 1314out:
@@ -1337,6 +1326,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1337 struct kioctx *ioctx = lookup_ioctx(ctx); 1326 struct kioctx *ioctx = lookup_ioctx(ctx);
1338 if (likely(NULL != ioctx)) { 1327 if (likely(NULL != ioctx)) {
1339 io_destroy(ioctx); 1328 io_destroy(ioctx);
1329 put_ioctx(ioctx);
1340 return 0; 1330 return 0;
1341 } 1331 }
1342 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1332 pr_debug("EINVAL: io_destroy: invalid context id\n");