summaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-07 19:18:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 21:38:28 -0400
commitcaf4167aa73bef20f5ce994b776ecb2e44226e5e (patch)
treeda4a35b0aa071b6d552948a59488838288f99bf7 /fs/aio.c
parent4e179bca6718693148c7445c236bc3e0e0013ffd (diff)
aio: dprintk() -> pr_debug()
Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c57
1 files changed, 24 insertions, 33 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 670cb8b84345..1574cb2a9eac 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -8,6 +8,8 @@
8 * 8 *
9 * See ../COPYING for licensing terms. 9 * See ../COPYING for licensing terms.
10 */ 10 */
11#define pr_fmt(fmt) "%s: " fmt, __func__
12
11#include <linux/kernel.h> 13#include <linux/kernel.h>
12#include <linux/init.h> 14#include <linux/init.h>
13#include <linux/errno.h> 15#include <linux/errno.h>
@@ -18,8 +20,6 @@
18#include <linux/backing-dev.h> 20#include <linux/backing-dev.h>
19#include <linux/uio.h> 21#include <linux/uio.h>
20 22
21#define DEBUG 0
22
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/file.h> 25#include <linux/file.h>
@@ -39,12 +39,6 @@
39#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41 41
42#if DEBUG > 1
43#define dprintk printk
44#else
45#define dprintk(x...) do { ; } while (0)
46#endif
47
48#define AIO_RING_MAGIC 0xa10a10a1 42#define AIO_RING_MAGIC 0xa10a10a1
49#define AIO_RING_COMPAT_FEATURES 1 43#define AIO_RING_COMPAT_FEATURES 1
50#define AIO_RING_INCOMPAT_FEATURES 0 44#define AIO_RING_INCOMPAT_FEATURES 0
@@ -124,7 +118,7 @@ static int __init aio_setup(void)
124 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 118 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
125 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 119 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
126 120
127 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 121 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
128 122
129 return 0; 123 return 0;
130} 124}
@@ -178,7 +172,7 @@ static int aio_setup_ring(struct kioctx *ctx)
178 } 172 }
179 173
180 info->mmap_size = nr_pages * PAGE_SIZE; 174 info->mmap_size = nr_pages * PAGE_SIZE;
181 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 175 pr_debug("attempting mmap of %lu bytes\n", info->mmap_size);
182 down_write(&mm->mmap_sem); 176 down_write(&mm->mmap_sem);
183 info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 177 info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size,
184 PROT_READ|PROT_WRITE, 178 PROT_READ|PROT_WRITE,
@@ -191,7 +185,7 @@ static int aio_setup_ring(struct kioctx *ctx)
191 return -EAGAIN; 185 return -EAGAIN;
192 } 186 }
193 187
194 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 188 pr_debug("mmap address: 0x%08lx\n", info->mmap_base);
195 info->nr_pages = get_user_pages(current, mm, info->mmap_base, nr_pages, 189 info->nr_pages = get_user_pages(current, mm, info->mmap_base, nr_pages,
196 1, 0, info->ring_pages, NULL); 190 1, 0, info->ring_pages, NULL);
197 up_write(&mm->mmap_sem); 191 up_write(&mm->mmap_sem);
@@ -265,7 +259,7 @@ static void __put_ioctx(struct kioctx *ctx)
265 aio_nr -= nr_events; 259 aio_nr -= nr_events;
266 spin_unlock(&aio_nr_lock); 260 spin_unlock(&aio_nr_lock);
267 } 261 }
268 pr_debug("__put_ioctx: freeing %p\n", ctx); 262 pr_debug("freeing %p\n", ctx);
269 call_rcu(&ctx->rcu_head, ctx_rcu_free); 263 call_rcu(&ctx->rcu_head, ctx_rcu_free);
270} 264}
271 265
@@ -354,7 +348,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
354 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 348 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
355 spin_unlock(&mm->ioctx_lock); 349 spin_unlock(&mm->ioctx_lock);
356 350
357 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 351 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
358 ctx, ctx->user_id, mm, ctx->ring_info.nr); 352 ctx, ctx->user_id, mm, ctx->ring_info.nr);
359 return ctx; 353 return ctx;
360 354
@@ -363,7 +357,7 @@ out_cleanup:
363 aio_free_ring(ctx); 357 aio_free_ring(ctx);
364out_freectx: 358out_freectx:
365 kmem_cache_free(kioctx_cachep, ctx); 359 kmem_cache_free(kioctx_cachep, ctx);
366 dprintk("aio: error allocating ioctx %d\n", err); 360 pr_debug("error allocating ioctx %d\n", err);
367 return ERR_PTR(err); 361 return ERR_PTR(err);
368} 362}
369 363
@@ -611,8 +605,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
611 */ 605 */
612static void __aio_put_req(struct kioctx *ctx, struct kiocb *req) 606static void __aio_put_req(struct kioctx *ctx, struct kiocb *req)
613{ 607{
614 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 608 pr_debug("(%p): f_count=%ld\n",
615 req, atomic_long_read(&req->ki_filp->f_count)); 609 req, atomic_long_read(&req->ki_filp->f_count));
616 610
617 assert_spin_locked(&ctx->ctx_lock); 611 assert_spin_locked(&ctx->ctx_lock);
618 612
@@ -722,9 +716,9 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
722 event->res = res; 716 event->res = res;
723 event->res2 = res2; 717 event->res2 = res2;
724 718
725 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 719 pr_debug("%p[%lu]: %p: %p %Lx %lx %lx\n",
726 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 720 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
727 res, res2); 721 res, res2);
728 722
729 /* after flagging the request as done, we 723 /* after flagging the request as done, we
730 * must never even look at it again 724 * must never even look at it again
@@ -780,9 +774,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
780 int ret = 0; 774 int ret = 0;
781 775
782 ring = kmap_atomic(info->ring_pages[0]); 776 ring = kmap_atomic(info->ring_pages[0]);
783 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 777 pr_debug("h%u t%u m%u\n", ring->head, ring->tail, ring->nr);
784 (unsigned long)ring->head, (unsigned long)ring->tail,
785 (unsigned long)ring->nr);
786 778
787 if (ring->head == ring->tail) 779 if (ring->head == ring->tail)
788 goto out; 780 goto out;
@@ -802,9 +794,8 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
802 spin_unlock(&info->ring_lock); 794 spin_unlock(&info->ring_lock);
803 795
804out: 796out:
805 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
806 (unsigned long)ring->head, (unsigned long)ring->tail);
807 kunmap_atomic(ring); 797 kunmap_atomic(ring);
798 pr_debug("%d h%u t%u\n", ret, ring->head, ring->tail);
808 return ret; 799 return ret;
809} 800}
810 801
@@ -867,13 +858,13 @@ static int read_events(struct kioctx *ctx,
867 if (unlikely(ret <= 0)) 858 if (unlikely(ret <= 0))
868 break; 859 break;
869 860
870 dprintk("read event: %Lx %Lx %Lx %Lx\n", 861 pr_debug("%Lx %Lx %Lx %Lx\n",
871 ent.data, ent.obj, ent.res, ent.res2); 862 ent.data, ent.obj, ent.res, ent.res2);
872 863
873 /* Could we split the check in two? */ 864 /* Could we split the check in two? */
874 ret = -EFAULT; 865 ret = -EFAULT;
875 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 866 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
876 dprintk("aio: lost an event due to EFAULT.\n"); 867 pr_debug("lost an event due to EFAULT.\n");
877 break; 868 break;
878 } 869 }
879 ret = 0; 870 ret = 0;
@@ -936,7 +927,7 @@ static int read_events(struct kioctx *ctx,
936 927
937 ret = -EFAULT; 928 ret = -EFAULT;
938 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 929 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
939 dprintk("aio: lost an event due to EFAULT.\n"); 930 pr_debug("lost an event due to EFAULT.\n");
940 break; 931 break;
941 } 932 }
942 933
@@ -967,7 +958,7 @@ static void io_destroy(struct kioctx *ioctx)
967 hlist_del_rcu(&ioctx->list); 958 hlist_del_rcu(&ioctx->list);
968 spin_unlock(&mm->ioctx_lock); 959 spin_unlock(&mm->ioctx_lock);
969 960
970 dprintk("aio_release(%p)\n", ioctx); 961 pr_debug("(%p)\n", ioctx);
971 if (likely(!was_dead)) 962 if (likely(!was_dead))
972 put_ioctx(ioctx); /* twice for the list */ 963 put_ioctx(ioctx); /* twice for the list */
973 964
@@ -1264,7 +1255,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1264 kiocb->ki_retry = aio_fsync; 1255 kiocb->ki_retry = aio_fsync;
1265 break; 1256 break;
1266 default: 1257 default:
1267 dprintk("EINVAL: io_submit: no operation provided\n"); 1258 pr_debug("EINVAL: no operation provided\n");
1268 ret = -EINVAL; 1259 ret = -EINVAL;
1269 } 1260 }
1270 1261
@@ -1284,7 +1275,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1284 1275
1285 /* enforce forwards compatibility on users */ 1276 /* enforce forwards compatibility on users */
1286 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1277 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1287 pr_debug("EINVAL: io_submit: reserve field set\n"); 1278 pr_debug("EINVAL: reserve field set\n");
1288 return -EINVAL; 1279 return -EINVAL;
1289 } 1280 }
1290 1281
@@ -1325,7 +1316,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1325 1316
1326 ret = put_user(req->ki_key, &user_iocb->aio_key); 1317 ret = put_user(req->ki_key, &user_iocb->aio_key);
1327 if (unlikely(ret)) { 1318 if (unlikely(ret)) {
1328 dprintk("EFAULT: aio_key\n"); 1319 pr_debug("EFAULT: aio_key\n");
1329 goto out_put_req; 1320 goto out_put_req;
1330 } 1321 }
1331 1322
@@ -1407,7 +1398,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1407 1398
1408 ctx = lookup_ioctx(ctx_id); 1399 ctx = lookup_ioctx(ctx_id);
1409 if (unlikely(!ctx)) { 1400 if (unlikely(!ctx)) {
1410 pr_debug("EINVAL: io_submit: invalid context id\n"); 1401 pr_debug("EINVAL: invalid context id\n");
1411 return -EINVAL; 1402 return -EINVAL;
1412 } 1403 }
1413 1404