aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-ioc.c36
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--include/linux/iocontext.h22
3 files changed, 47 insertions, 15 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 10928740b5da..439ec21fd787 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -149,20 +149,20 @@ void put_io_context(struct io_context *ioc)
149} 149}
150EXPORT_SYMBOL(put_io_context); 150EXPORT_SYMBOL(put_io_context);
151 151
152/* Called by the exiting task */ 152/**
153void exit_io_context(struct task_struct *task) 153 * put_io_context_active - put active reference on ioc
154 * @ioc: ioc of interest
155 *
156 * Undo get_io_context_active(). If active reference reaches zero after
157 * put, @ioc can never issue further IOs and ioscheds are notified.
158 */
159void put_io_context_active(struct io_context *ioc)
154{ 160{
155 struct io_context *ioc;
156 struct io_cq *icq;
157 struct hlist_node *n; 161 struct hlist_node *n;
158 unsigned long flags; 162 unsigned long flags;
163 struct io_cq *icq;
159 164
160 task_lock(task); 165 if (!atomic_dec_and_test(&ioc->active_ref)) {
161 ioc = task->io_context;
162 task->io_context = NULL;
163 task_unlock(task);
164
165 if (!atomic_dec_and_test(&ioc->nr_tasks)) {
166 put_io_context(ioc); 166 put_io_context(ioc);
167 return; 167 return;
168 } 168 }
@@ -191,6 +191,20 @@ retry:
191 put_io_context(ioc); 191 put_io_context(ioc);
192} 192}
193 193
194/* Called by the exiting task */
195void exit_io_context(struct task_struct *task)
196{
197 struct io_context *ioc;
198
199 task_lock(task);
200 ioc = task->io_context;
201 task->io_context = NULL;
202 task_unlock(task);
203
204 atomic_dec(&ioc->nr_tasks);
205 put_io_context_active(ioc);
206}
207
194/** 208/**
195 * ioc_clear_queue - break any ioc association with the specified queue 209 * ioc_clear_queue - break any ioc association with the specified queue
196 * @q: request_queue being cleared 210 * @q: request_queue being cleared
@@ -223,7 +237,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
223 237
224 /* initialize */ 238 /* initialize */
225 atomic_long_set(&ioc->refcount, 1); 239 atomic_long_set(&ioc->refcount, 1);
226 atomic_set(&ioc->nr_tasks, 1); 240 atomic_set(&ioc->active_ref, 1);
227 spin_lock_init(&ioc->lock); 241 spin_lock_init(&ioc->lock);
228 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); 242 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
229 INIT_HLIST_HEAD(&ioc->icq_list); 243 INIT_HLIST_HEAD(&ioc->icq_list);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9e386d9bcb79..9a4eac490e0b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1865,7 +1865,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1865 * task has exited, don't wait 1865 * task has exited, don't wait
1866 */ 1866 */
1867 cic = cfqd->active_cic; 1867 cic = cfqd->active_cic;
1868 if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks)) 1868 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
1869 return; 1869 return;
1870 1870
1871 /* 1871 /*
@@ -2841,7 +2841,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2841 2841
2842 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) 2842 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
2843 enable_idle = 0; 2843 enable_idle = 0;
2844 else if (!atomic_read(&cic->icq.ioc->nr_tasks) || 2844 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
2845 !cfqd->cfq_slice_idle || 2845 !cfqd->cfq_slice_idle ||
2846 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) 2846 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
2847 enable_idle = 0; 2847 enable_idle = 0;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 81a8870ac224..6f1a2608e91f 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -100,6 +100,7 @@ struct io_cq {
100 */ 100 */
101struct io_context { 101struct io_context {
102 atomic_long_t refcount; 102 atomic_long_t refcount;
103 atomic_t active_ref;
103 atomic_t nr_tasks; 104 atomic_t nr_tasks;
104 105
105 /* all the fields below are protected by this lock */ 106 /* all the fields below are protected by this lock */
@@ -120,17 +121,34 @@ struct io_context {
120 struct work_struct release_work; 121 struct work_struct release_work;
121}; 122};
122 123
123static inline void ioc_task_link(struct io_context *ioc) 124/**
125 * get_io_context_active - get active reference on ioc
126 * @ioc: ioc of interest
127 *
128 * Only iocs with active reference can issue new IOs. This function
129 * acquires an active reference on @ioc. The caller must already have an
130 * active reference on @ioc.
131 */
132static inline void get_io_context_active(struct io_context *ioc)
124{ 133{
125 WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0); 134 WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
126 WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); 135 WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
127 atomic_long_inc(&ioc->refcount); 136 atomic_long_inc(&ioc->refcount);
137 atomic_inc(&ioc->active_ref);
138}
139
140static inline void ioc_task_link(struct io_context *ioc)
141{
142 get_io_context_active(ioc);
143
144 WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
128 atomic_inc(&ioc->nr_tasks); 145 atomic_inc(&ioc->nr_tasks);
129} 146}
130 147
131struct task_struct; 148struct task_struct;
132#ifdef CONFIG_BLOCK 149#ifdef CONFIG_BLOCK
133void put_io_context(struct io_context *ioc); 150void put_io_context(struct io_context *ioc);
151void put_io_context_active(struct io_context *ioc);
134void exit_io_context(struct task_struct *task); 152void exit_io_context(struct task_struct *task);
135struct io_context *get_task_io_context(struct task_struct *task, 153struct io_context *get_task_io_context(struct task_struct *task,
136 gfp_t gfp_flags, int node); 154 gfp_t gfp_flags, int node);