aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c79
1 files changed, 43 insertions, 36 deletions
diff --git a/fs/aio.c b/fs/aio.c
index ae94e1dea266..b5253e77eb2f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -191,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx)
191 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 191 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
192} while(0) 192} while(0)
193 193
194
195/* __put_ioctx
196 * Called when the last user of an aio context has gone away,
197 * and the struct needs to be freed.
198 */
199static void __put_ioctx(struct kioctx *ctx)
200{
201 unsigned nr_events = ctx->max_reqs;
202
203 BUG_ON(ctx->reqs_active);
204
205 cancel_delayed_work(&ctx->wq);
206 cancel_work_sync(&ctx->wq.work);
207 aio_free_ring(ctx);
208 mmdrop(ctx->mm);
209 ctx->mm = NULL;
210 pr_debug("__put_ioctx: freeing %p\n", ctx);
211 kmem_cache_free(kioctx_cachep, ctx);
212
213 if (nr_events) {
214 spin_lock(&aio_nr_lock);
215 BUG_ON(aio_nr - nr_events > aio_nr);
216 aio_nr -= nr_events;
217 spin_unlock(&aio_nr_lock);
218 }
219}
220
221#define get_ioctx(kioctx) do { \
222 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
223 atomic_inc(&(kioctx)->users); \
224} while (0)
225#define put_ioctx(kioctx) do { \
226 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
227 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
228 __put_ioctx(kioctx); \
229} while (0)
230
194/* ioctx_alloc 231/* ioctx_alloc
195 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 232 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
196 */ 233 */
@@ -240,7 +277,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
240 if (ctx->max_reqs == 0) 277 if (ctx->max_reqs == 0)
241 goto out_cleanup; 278 goto out_cleanup;
242 279
243 /* now link into global list. kludge. FIXME */ 280 /* now link into global list. */
244 write_lock(&mm->ioctx_list_lock); 281 write_lock(&mm->ioctx_list_lock);
245 ctx->next = mm->ioctx_list; 282 ctx->next = mm->ioctx_list;
246 mm->ioctx_list = ctx; 283 mm->ioctx_list = ctx;
@@ -361,32 +398,6 @@ void exit_aio(struct mm_struct *mm)
361 } 398 }
362} 399}
363 400
364/* __put_ioctx
365 * Called when the last user of an aio context has gone away,
366 * and the struct needs to be freed.
367 */
368void __put_ioctx(struct kioctx *ctx)
369{
370 unsigned nr_events = ctx->max_reqs;
371
372 BUG_ON(ctx->reqs_active);
373
374 cancel_delayed_work(&ctx->wq);
375 cancel_work_sync(&ctx->wq.work);
376 aio_free_ring(ctx);
377 mmdrop(ctx->mm);
378 ctx->mm = NULL;
379 pr_debug("__put_ioctx: freeing %p\n", ctx);
380 kmem_cache_free(kioctx_cachep, ctx);
381
382 if (nr_events) {
383 spin_lock(&aio_nr_lock);
384 BUG_ON(aio_nr - nr_events > aio_nr);
385 aio_nr -= nr_events;
386 spin_unlock(&aio_nr_lock);
387 }
388}
389
390/* aio_get_req 401/* aio_get_req
391 * Allocate a slot for an aio request. Increments the users count 402 * Allocate a slot for an aio request. Increments the users count
392 * of the kioctx so that the kioctx stays around until all requests are 403 * of the kioctx so that the kioctx stays around until all requests are
@@ -542,10 +553,7 @@ int aio_put_req(struct kiocb *req)
542 return ret; 553 return ret;
543} 554}
544 555
545/* Lookup an ioctx id. ioctx_list is lockless for reads. 556static struct kioctx *lookup_ioctx(unsigned long ctx_id)
546 * FIXME: this is O(n) and is only suitable for development.
547 */
548struct kioctx *lookup_ioctx(unsigned long ctx_id)
549{ 557{
550 struct kioctx *ioctx; 558 struct kioctx *ioctx;
551 struct mm_struct *mm; 559 struct mm_struct *mm;
@@ -1070,9 +1078,7 @@ static void timeout_func(unsigned long data)
1070 1078
1071static inline void init_timeout(struct aio_timeout *to) 1079static inline void init_timeout(struct aio_timeout *to)
1072{ 1080{
1073 init_timer(&to->timer); 1081 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1074 to->timer.data = (unsigned long)to;
1075 to->timer.function = timeout_func;
1076 to->timed_out = 0; 1082 to->timed_out = 0;
1077 to->p = current; 1083 to->p = current;
1078} 1084}
@@ -1205,6 +1211,7 @@ retry:
1205 if (timeout) 1211 if (timeout)
1206 clear_timeout(&to); 1212 clear_timeout(&to);
1207out: 1213out:
1214 destroy_timer_on_stack(&to.timer);
1208 return i ? i : ret; 1215 return i ? i : ret;
1209} 1216}
1210 1217
@@ -1552,7 +1559,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
1552 return 1; 1559 return 1;
1553} 1560}
1554 1561
1555int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1562static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1556 struct iocb *iocb) 1563 struct iocb *iocb)
1557{ 1564{
1558 struct kiocb *req; 1565 struct kiocb *req;
@@ -1593,7 +1600,7 @@ int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1593 * event using the eventfd_signal() function. 1600 * event using the eventfd_signal() function.
1594 */ 1601 */
1595 req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); 1602 req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
1596 if (unlikely(IS_ERR(req->ki_eventfd))) { 1603 if (IS_ERR(req->ki_eventfd)) {
1597 ret = PTR_ERR(req->ki_eventfd); 1604 ret = PTR_ERR(req->ki_eventfd);
1598 goto out_put_req; 1605 goto out_put_req;
1599 } 1606 }