aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:40 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:40 -0500
commitf2dbd76a0a994bc1d5a3d0e7c844cc373832e86c (patch)
treecae6a4333ee6e5eb76ef133dfdee95e1943c0ab1 /block/blk.h
parent1238033c79e92e5c315af12e45396f1a78c73dec (diff)
block, cfq: replace current_io_context() with create_io_context()
When called under queue_lock, current_io_context() triggers lockdep warning if it hits allocation path. This is because io_context installation is protected by task_lock which is not IRQ safe, so it triggers irq-unsafe-lock -> irq -> irq-safe-lock -> irq-unsafe-lock deadlock warning. Given the restriction, accessor + creator rolled into one doesn't work too well. Drop current_io_context() and let the users access task->io_context directly inside queue_lock combined with explicit creation using create_io_context(). Future ioc updates will further consolidate ioc access and the create interface will be unexported. While at it, relocate ioc internal interface declarations in blk.h and add section comments before and after. This patch does not introduce functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h36
1 files changed, 33 insertions, 3 deletions
diff --git a/block/blk.h b/block/blk.h
index 8d421156fefb..5bca2668e1bf 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -127,9 +127,6 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
127} 127}
128#endif 128#endif
129 129
130void get_io_context(struct io_context *ioc);
131struct io_context *current_io_context(gfp_t gfp_flags, int node);
132
133int ll_back_merge_fn(struct request_queue *q, struct request *req, 130int ll_back_merge_fn(struct request_queue *q, struct request *req,
134 struct bio *bio); 131 struct bio *bio);
135int ll_front_merge_fn(struct request_queue *q, struct request *req, 132int ll_front_merge_fn(struct request_queue *q, struct request *req,
@@ -198,6 +195,39 @@ static inline int blk_do_io_stat(struct request *rq)
198 (rq->cmd_flags & REQ_DISCARD)); 195 (rq->cmd_flags & REQ_DISCARD));
199} 196}
200 197
198/*
199 * Internal io_context interface
200 */
201void get_io_context(struct io_context *ioc);
202
203void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
204 int node);
205
206/**
207 * create_io_context - try to create task->io_context
208 * @task: target task
209 * @gfp_mask: allocation mask
210 * @node: allocation node
211 *
212 * If @task->io_context is %NULL, allocate a new io_context and install it.
213 * Returns the current @task->io_context which may be %NULL if allocation
214 * failed.
215 *
216 * Note that this function can't be called with IRQ disabled because
217 * task_lock which protects @task->io_context is IRQ-unsafe.
218 */
219static inline struct io_context *create_io_context(struct task_struct *task,
220 gfp_t gfp_mask, int node)
221{
222 WARN_ON_ONCE(irqs_disabled());
223 if (unlikely(!task->io_context))
224 create_io_context_slowpath(task, gfp_mask, node);
225 return task->io_context;
226}
227
228/*
229 * Internal throttling interface
230 */
201#ifdef CONFIG_BLK_DEV_THROTTLING 231#ifdef CONFIG_BLK_DEV_THROTTLING
202extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); 232extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
203extern void blk_throtl_drain(struct request_queue *q); 233extern void blk_throtl_drain(struct request_queue *q);