aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2014-05-31 12:43:36 -0400
committerJens Axboe <axboe@fb.com>2014-06-03 23:04:38 -0400
commit1aecfe4887713838c79bc52f774609a57db4f988 (patch)
treef2b2baf54092829ab1fc3d97087ac6b45b89cc04 /block
parent3de0ef8d0d3350964720cad2a0a72984f1bb81ba (diff)
blk-mq: move blk_mq_get_ctx/blk_mq_put_ctx to mq private header
The blk-mq tag code need these helpers. Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c22
-rw-r--r--block/blk-mq.h22
2 files changed, 22 insertions, 22 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0f5879c42dcd..b9230c522c6b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list);
33 33
34static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 34static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35 35
36static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
37 unsigned int cpu)
38{
39 return per_cpu_ptr(q->queue_ctx, cpu);
40}
41
42/*
43 * This assumes per-cpu software queueing queues. They could be per-node
44 * as well, for instance. For now this is hardcoded as-is. Note that we don't
45 * care about preemption, since we know the ctx's are persistent. This does
46 * mean that we can't rely on ctx always matching the currently running CPU.
47 */
48static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
49{
50 return __blk_mq_get_ctx(q, get_cpu());
51}
52
53static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
54{
55 put_cpu();
56}
57
58/* 36/*
59 * Check if any of the ctx's have pending work in this hardware queue 37 * Check if any of the ctx's have pending work in this hardware queue
60 */ 38 */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index de7b3bbd5bd6..57a7968e47b3 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -69,4 +69,26 @@ struct blk_align_bitmap {
69 unsigned long depth; 69 unsigned long depth;
70} ____cacheline_aligned_in_smp; 70} ____cacheline_aligned_in_smp;
71 71
72static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
73 unsigned int cpu)
74{
75 return per_cpu_ptr(q->queue_ctx, cpu);
76}
77
78/*
79 * This assumes per-cpu software queueing queues. They could be per-node
80 * as well, for instance. For now this is hardcoded as-is. Note that we don't
81 * care about preemption, since we know the ctx's are persistent. This does
82 * mean that we can't rely on ctx always matching the currently running CPU.
83 */
84static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
85{
86 return __blk_mq_get_ctx(q, get_cpu());
87}
88
89static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
90{
91 put_cpu();
92}
93
72#endif 94#endif