aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-ioc.c12
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--include/linux/iocontext.h6
3 files changed, 10 insertions, 10 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 012f065ac8e..d4ed6000147 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc)
35 if (ioc == NULL) 35 if (ioc == NULL)
36 return 1; 36 return 1;
37 37
38 BUG_ON(atomic_read(&ioc->refcount) == 0); 38 BUG_ON(atomic_long_read(&ioc->refcount) == 0);
39 39
40 if (atomic_dec_and_test(&ioc->refcount)) { 40 if (atomic_long_dec_and_test(&ioc->refcount)) {
41 rcu_read_lock(); 41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor) 42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic); 43 ioc->aic->dtor(ioc->aic);
@@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
90 90
91 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); 91 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
92 if (ret) { 92 if (ret) {
93 atomic_set(&ret->refcount, 1); 93 atomic_long_set(&ret->refcount, 1);
94 atomic_set(&ret->nr_tasks, 1); 94 atomic_set(&ret->nr_tasks, 1);
95 spin_lock_init(&ret->lock); 95 spin_lock_init(&ret->lock);
96 ret->ioprio_changed = 0; 96 ret->ioprio_changed = 0;
@@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
151 ret = current_io_context(gfp_flags, node); 151 ret = current_io_context(gfp_flags, node);
152 if (unlikely(!ret)) 152 if (unlikely(!ret))
153 break; 153 break;
154 } while (!atomic_inc_not_zero(&ret->refcount)); 154 } while (!atomic_long_inc_not_zero(&ret->refcount));
155 155
156 return ret; 156 return ret;
157} 157}
@@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
163 struct io_context *dst = *pdst; 163 struct io_context *dst = *pdst;
164 164
165 if (src) { 165 if (src) {
166 BUG_ON(atomic_read(&src->refcount) == 0); 166 BUG_ON(atomic_long_read(&src->refcount) == 0);
167 atomic_inc(&src->refcount); 167 atomic_long_inc(&src->refcount);
168 put_io_context(dst); 168 put_io_context(dst);
169 *pdst = src; 169 *pdst = src;
170 } 170 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 99ac4304d71..ef2f72d4243 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1282 if (!cfqd->active_cic) { 1282 if (!cfqd->active_cic) {
1283 struct cfq_io_context *cic = RQ_CIC(rq); 1283 struct cfq_io_context *cic = RQ_CIC(rq);
1284 1284
1285 atomic_inc(&cic->ioc->refcount); 1285 atomic_long_inc(&cic->ioc->refcount);
1286 cfqd->active_cic = cic; 1286 cfqd->active_cic = cic;
1287 } 1287 }
1288} 1288}
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 08b987bccf8..dd05434fa45 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -64,7 +64,7 @@ struct cfq_io_context {
64 * and kmalloc'ed. These could be shared between processes. 64 * and kmalloc'ed. These could be shared between processes.
65 */ 65 */
66struct io_context { 66struct io_context {
67 atomic_t refcount; 67 atomic_long_t refcount;
68 atomic_t nr_tasks; 68 atomic_t nr_tasks;
69 69
70 /* all the fields below are protected by this lock */ 70 /* all the fields below are protected by this lock */
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
91 * if ref count is zero, don't allow sharing (ioc is going away, it's 91 * if ref count is zero, don't allow sharing (ioc is going away, it's
92 * a race). 92 * a race).
93 */ 93 */
94 if (ioc && atomic_inc_not_zero(&ioc->refcount)) { 94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
95 atomic_inc(&ioc->nr_tasks); 95 atomic_long_inc(&ioc->refcount);
96 return ioc; 96 return ioc;
97 } 97 }
98 98