diff options
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 22 |
1 files changed, 0 insertions, 22 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 0f5879c42dcd..b9230c522c6b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list); | |||
33 | 33 | ||
34 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); | 34 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); |
35 | 35 | ||
36 | static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, | ||
37 | unsigned int cpu) | ||
38 | { | ||
39 | return per_cpu_ptr(q->queue_ctx, cpu); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * This assumes per-cpu software queueing queues. They could be per-node | ||
44 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | ||
45 | * care about preemption, since we know the ctx's are persistent. This does | ||
46 | * mean that we can't rely on ctx always matching the currently running CPU. | ||
47 | */ | ||
48 | static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | ||
49 | { | ||
50 | return __blk_mq_get_ctx(q, get_cpu()); | ||
51 | } | ||
52 | |||
53 | static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | ||
54 | { | ||
55 | put_cpu(); | ||
56 | } | ||
57 | |||
58 | /* | 36 | /* |
59 | * Check if any of the ctx's have pending work in this hardware queue | 37 | * Check if any of the ctx's have pending work in this hardware queue |
60 | */ | 38 | */ |