diff options
author | Nikanth Karthikesan <knikanth@novell.com> | 2009-06-10 15:57:06 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-06-10 17:07:15 -0400 |
commit | d9c7d394a8ebacb60097b192939ae9f15235225e (patch) | |
tree | 4fbf9c7677acb03c23d05faba1a4b0bcfb6702b4 /block/blk-ioc.c | |
parent | 1d589bb16b825b3a7b4edd34d997f1f1f953033d (diff) |
block: prevent possible io_context->refcount overflow
Currently io_context has an atomic_t(32-bit) as refcount. In the case of
cfq, for each device against whcih a task does I/O, a reference to the
io_context would be taken. And when there are multiple process sharing
io_contexts(CLONE_IO) would also have a reference to the same io_context.
Theoretically the possible maximum number of processes sharing the same
io_context + the number of disks/cfq_data referring to the same io_context
can overflow the 32-bit counter on a very high-end machine.
Even though it is an improbable case, let us make it atomic_long_t.
Signed-off-by: Nikanth Karthikesan <knikanth@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 012f065ac8e2..d4ed6000147d 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc) | |||
35 | if (ioc == NULL) | 35 | if (ioc == NULL) |
36 | return 1; | 36 | return 1; |
37 | 37 | ||
38 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 38 | BUG_ON(atomic_long_read(&ioc->refcount) == 0); |
39 | 39 | ||
40 | if (atomic_dec_and_test(&ioc->refcount)) { | 40 | if (atomic_long_dec_and_test(&ioc->refcount)) { |
41 | rcu_read_lock(); | 41 | rcu_read_lock(); |
42 | if (ioc->aic && ioc->aic->dtor) | 42 | if (ioc->aic && ioc->aic->dtor) |
43 | ioc->aic->dtor(ioc->aic); | 43 | ioc->aic->dtor(ioc->aic); |
@@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | |||
90 | 90 | ||
91 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | 91 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); |
92 | if (ret) { | 92 | if (ret) { |
93 | atomic_set(&ret->refcount, 1); | 93 | atomic_long_set(&ret->refcount, 1); |
94 | atomic_set(&ret->nr_tasks, 1); | 94 | atomic_set(&ret->nr_tasks, 1); |
95 | spin_lock_init(&ret->lock); | 95 | spin_lock_init(&ret->lock); |
96 | ret->ioprio_changed = 0; | 96 | ret->ioprio_changed = 0; |
@@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node) | |||
151 | ret = current_io_context(gfp_flags, node); | 151 | ret = current_io_context(gfp_flags, node); |
152 | if (unlikely(!ret)) | 152 | if (unlikely(!ret)) |
153 | break; | 153 | break; |
154 | } while (!atomic_inc_not_zero(&ret->refcount)); | 154 | } while (!atomic_long_inc_not_zero(&ret->refcount)); |
155 | 155 | ||
156 | return ret; | 156 | return ret; |
157 | } | 157 | } |
@@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc) | |||
163 | struct io_context *dst = *pdst; | 163 | struct io_context *dst = *pdst; |
164 | 164 | ||
165 | if (src) { | 165 | if (src) { |
166 | BUG_ON(atomic_read(&src->refcount) == 0); | 166 | BUG_ON(atomic_long_read(&src->refcount) == 0); |
167 | atomic_inc(&src->refcount); | 167 | atomic_long_inc(&src->refcount); |
168 | put_io_context(dst); | 168 | put_io_context(dst); |
169 | *pdst = src; | 169 | *pdst = src; |
170 | } | 170 | } |