diff options
author | Adrian Bunk <bunk@kernel.org> | 2008-04-29 03:58:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 11:06:00 -0400 |
commit | d5470b596abdd566339b2417e807b1198be64b97 (patch) | |
tree | e668e8196ed46efc9ea6036c5889e5f3a54dedcf /fs/aio.c | |
parent | 07d45da616f8514651360b502314fc9554223a03 (diff) |
fs/aio.c: make 3 functions static
Make the following needlessly global functions static:
- __put_ioctx()
- lookup_ioctx()
- io_submit_one()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Cc: Zach Brown <zach.brown@oracle.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 67 |
1 files changed, 39 insertions, 28 deletions
@@ -191,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
191 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ | 191 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ |
192 | } while(0) | 192 | } while(0) |
193 | 193 | ||
194 | |||
195 | /* __put_ioctx | ||
196 | * Called when the last user of an aio context has gone away, | ||
197 | * and the struct needs to be freed. | ||
198 | */ | ||
199 | static void __put_ioctx(struct kioctx *ctx) | ||
200 | { | ||
201 | unsigned nr_events = ctx->max_reqs; | ||
202 | |||
203 | BUG_ON(ctx->reqs_active); | ||
204 | |||
205 | cancel_delayed_work(&ctx->wq); | ||
206 | cancel_work_sync(&ctx->wq.work); | ||
207 | aio_free_ring(ctx); | ||
208 | mmdrop(ctx->mm); | ||
209 | ctx->mm = NULL; | ||
210 | pr_debug("__put_ioctx: freeing %p\n", ctx); | ||
211 | kmem_cache_free(kioctx_cachep, ctx); | ||
212 | |||
213 | if (nr_events) { | ||
214 | spin_lock(&aio_nr_lock); | ||
215 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
216 | aio_nr -= nr_events; | ||
217 | spin_unlock(&aio_nr_lock); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | #define get_ioctx(kioctx) do { \ | ||
222 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | ||
223 | atomic_inc(&(kioctx)->users); \ | ||
224 | } while (0) | ||
225 | #define put_ioctx(kioctx) do { \ | ||
226 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | ||
227 | if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ | ||
228 | __put_ioctx(kioctx); \ | ||
229 | } while (0) | ||
230 | |||
194 | /* ioctx_alloc | 231 | /* ioctx_alloc |
195 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | 232 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
196 | */ | 233 | */ |
@@ -361,32 +398,6 @@ void exit_aio(struct mm_struct *mm) | |||
361 | } | 398 | } |
362 | } | 399 | } |
363 | 400 | ||
364 | /* __put_ioctx | ||
365 | * Called when the last user of an aio context has gone away, | ||
366 | * and the struct needs to be freed. | ||
367 | */ | ||
368 | void __put_ioctx(struct kioctx *ctx) | ||
369 | { | ||
370 | unsigned nr_events = ctx->max_reqs; | ||
371 | |||
372 | BUG_ON(ctx->reqs_active); | ||
373 | |||
374 | cancel_delayed_work(&ctx->wq); | ||
375 | cancel_work_sync(&ctx->wq.work); | ||
376 | aio_free_ring(ctx); | ||
377 | mmdrop(ctx->mm); | ||
378 | ctx->mm = NULL; | ||
379 | pr_debug("__put_ioctx: freeing %p\n", ctx); | ||
380 | kmem_cache_free(kioctx_cachep, ctx); | ||
381 | |||
382 | if (nr_events) { | ||
383 | spin_lock(&aio_nr_lock); | ||
384 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
385 | aio_nr -= nr_events; | ||
386 | spin_unlock(&aio_nr_lock); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* aio_get_req | 401 | /* aio_get_req |
391 | * Allocate a slot for an aio request. Increments the users count | 402 | * Allocate a slot for an aio request. Increments the users count |
392 | * of the kioctx so that the kioctx stays around until all requests are | 403 | * of the kioctx so that the kioctx stays around until all requests are |
@@ -545,7 +556,7 @@ int aio_put_req(struct kiocb *req) | |||
545 | /* Lookup an ioctx id. ioctx_list is lockless for reads. | 556 | /* Lookup an ioctx id. ioctx_list is lockless for reads. |
546 | * FIXME: this is O(n) and is only suitable for development. | 557 | * FIXME: this is O(n) and is only suitable for development. |
547 | */ | 558 | */ |
548 | struct kioctx *lookup_ioctx(unsigned long ctx_id) | 559 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
549 | { | 560 | { |
550 | struct kioctx *ioctx; | 561 | struct kioctx *ioctx; |
551 | struct mm_struct *mm; | 562 | struct mm_struct *mm; |
@@ -1552,7 +1563,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode, | |||
1552 | return 1; | 1563 | return 1; |
1553 | } | 1564 | } |
1554 | 1565 | ||
1555 | int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1566 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1556 | struct iocb *iocb) | 1567 | struct iocb *iocb) |
1557 | { | 1568 | { |
1558 | struct kiocb *req; | 1569 | struct kiocb *req; |