aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk.h
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h32
1 files changed, 13 insertions, 19 deletions
diff --git a/block/blk.h b/block/blk.h
index d45be871329e..85f6ae42f7d3 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
23 struct bio *bio); 23 struct bio *bio);
24int blk_rq_append_bio(struct request_queue *q, struct request *rq, 24int blk_rq_append_bio(struct request_queue *q, struct request *rq,
25 struct bio *bio); 25 struct bio *bio);
26void blk_drain_queue(struct request_queue *q, bool drain_all); 26void blk_queue_bypass_start(struct request_queue *q);
27void blk_queue_bypass_end(struct request_queue *q);
27void blk_dequeue_request(struct request *rq); 28void blk_dequeue_request(struct request *rq);
28void __blk_queue_free_tags(struct request_queue *q); 29void __blk_queue_free_tags(struct request_queue *q);
29bool __blk_end_bidi_request(struct request *rq, int error, 30bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
144 145
145int blk_dev_init(void); 146int blk_dev_init(void);
146 147
147void elv_quiesce_start(struct request_queue *q);
148void elv_quiesce_end(struct request_queue *q);
149
150 148
151/* 149/*
152 * Return the threshold (number of used requests) at which the queue is 150 * Return the threshold (number of used requests) at which the queue is
@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
186 */ 184 */
187void get_io_context(struct io_context *ioc); 185void get_io_context(struct io_context *ioc);
188struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 186struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
189struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask); 187struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
188 gfp_t gfp_mask);
190void ioc_clear_queue(struct request_queue *q); 189void ioc_clear_queue(struct request_queue *q);
191 190
192void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, 191int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
193 int node);
194 192
195/** 193/**
196 * create_io_context - try to create task->io_context 194 * create_io_context - try to create task->io_context
197 * @task: target task
198 * @gfp_mask: allocation mask 195 * @gfp_mask: allocation mask
199 * @node: allocation node 196 * @node: allocation node
200 * 197 *
201 * If @task->io_context is %NULL, allocate a new io_context and install it. 198 * If %current->io_context is %NULL, allocate a new io_context and install
202 * Returns the current @task->io_context which may be %NULL if allocation 199 * it. Returns the current %current->io_context which may be %NULL if
203 * failed. 200 * allocation failed.
204 * 201 *
205 * Note that this function can't be called with IRQ disabled because 202 * Note that this function can't be called with IRQ disabled because
206 * task_lock which protects @task->io_context is IRQ-unsafe. 203 * task_lock which protects %current->io_context is IRQ-unsafe.
207 */ 204 */
208static inline struct io_context *create_io_context(struct task_struct *task, 205static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
209 gfp_t gfp_mask, int node)
210{ 206{
211 WARN_ON_ONCE(irqs_disabled()); 207 WARN_ON_ONCE(irqs_disabled());
212 if (unlikely(!task->io_context)) 208 if (unlikely(!current->io_context))
213 create_io_context_slowpath(task, gfp_mask, node); 209 create_task_io_context(current, gfp_mask, node);
214 return task->io_context; 210 return current->io_context;
215} 211}
216 212
217/* 213/*
@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
222extern void blk_throtl_drain(struct request_queue *q); 218extern void blk_throtl_drain(struct request_queue *q);
223extern int blk_throtl_init(struct request_queue *q); 219extern int blk_throtl_init(struct request_queue *q);
224extern void blk_throtl_exit(struct request_queue *q); 220extern void blk_throtl_exit(struct request_queue *q);
225extern void blk_throtl_release(struct request_queue *q);
226#else /* CONFIG_BLK_DEV_THROTTLING */ 221#else /* CONFIG_BLK_DEV_THROTTLING */
227static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 222static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
228{ 223{
@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
231static inline void blk_throtl_drain(struct request_queue *q) { } 226static inline void blk_throtl_drain(struct request_queue *q) { }
232static inline int blk_throtl_init(struct request_queue *q) { return 0; } 227static inline int blk_throtl_init(struct request_queue *q) { return 0; }
233static inline void blk_throtl_exit(struct request_queue *q) { } 228static inline void blk_throtl_exit(struct request_queue *q) { }
234static inline void blk_throtl_release(struct request_queue *q) { }
235#endif /* CONFIG_BLK_DEV_THROTTLING */ 229#endif /* CONFIG_BLK_DEV_THROTTLING */
236 230
237#endif /* BLK_INTERNAL_H */ 231#endif /* BLK_INTERNAL_H */