aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:37 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:37 -0500
commit1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55 (patch)
tree1e8a8d5cfdfeff0f92490985bd125ab6666673b0 /block
parentdc47ce90c3a822cd7c9e9339fe4d5f61dcb26b50 (diff)
block, sx8: kill blk_insert_request()
The only user left for blk_insert_request() is sx8 and it can be trivially switched to use blk_execute_rq_nowait() - special requests aren't included in io stat and sx8 doesn't use block layer tagging. Switch sx8 and kill blk_insert_requeset(). This patch doesn't introduce any functional difference. Only compile tested. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Jeff Garzik <jgarzik@pobox.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c48
1 files changed, 0 insertions, 48 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ea70e6c80cd3..435af2378614 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1010,54 +1010,6 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1010 __elv_add_request(q, rq, where); 1010 __elv_add_request(q, rq, where);
1011} 1011}
1012 1012
1013/**
1014 * blk_insert_request - insert a special request into a request queue
1015 * @q: request queue where request should be inserted
1016 * @rq: request to be inserted
1017 * @at_head: insert request at head or tail of queue
1018 * @data: private data
1019 *
1020 * Description:
1021 * Many block devices need to execute commands asynchronously, so they don't
1022 * block the whole kernel from preemption during request execution. This is
1023 * accomplished normally by inserting aritficial requests tagged as
1024 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
1025 * be scheduled for actual execution by the request queue.
1026 *
1027 * We have the option of inserting the head or the tail of the queue.
1028 * Typically we use the tail for new ioctls and so forth. We use the head
1029 * of the queue for things like a QUEUE_FULL message from a device, or a
1030 * host that is unable to accept a particular command.
1031 */
1032void blk_insert_request(struct request_queue *q, struct request *rq,
1033 int at_head, void *data)
1034{
1035 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
1036 unsigned long flags;
1037
1038 /*
1039 * tell I/O scheduler that this isn't a regular read/write (ie it
1040 * must not attempt merges on this) and that it acts as a soft
1041 * barrier
1042 */
1043 rq->cmd_type = REQ_TYPE_SPECIAL;
1044
1045 rq->special = data;
1046
1047 spin_lock_irqsave(q->queue_lock, flags);
1048
1049 /*
1050 * If command is tagged, release the tag
1051 */
1052 if (blk_rq_tagged(rq))
1053 blk_queue_end_tag(q, rq);
1054
1055 add_acct_request(q, rq, where);
1056 __blk_run_queue(q);
1057 spin_unlock_irqrestore(q->queue_lock, flags);
1058}
1059EXPORT_SYMBOL(blk_insert_request);
1060
1061static void part_round_stats_single(int cpu, struct hd_struct *part, 1013static void part_round_stats_single(int cpu, struct hd_struct *part,
1062 unsigned long now) 1014 unsigned long now)
1063{ 1015{